I have an error code is 23, type is 19, which is AI_ERROR_CODE_INVALID_FORMAT. Is there certain mistakes I could be making?
I've followed the example on Embedded Inference Client API. However when I run the model I get the error code 23 and type 19 which is the AI_ERROR_CODE_INVALID_FORMAT.
I've checked the type with the following code and both the input and output return 1 for float_stat, this shows that it is a float type
/* Extract format of the first input tensor (index 0) */
volatile const ai_buffer *ai_input_1 = &ai_input[0];
/* Extract format of the tensor */
fmt_1 = AI_BUFFER_FORMAT(ai_input_1);
//Extract the float value 1 = float type
float_stat = AI_BUFFER_FMT_GET_FLOAT(fmt_1);
I've included my code to see if there is something obvious. I have used the API reference that is accessed through CubeIDE but I have found no help. I'm unsure of any other documentation that may help either or examples. I've removed the default code to make it easier to read.
An Onnx model was used which is a sklearn model that has been converted. I was able to upload, analyse, graph and validate successfully. I have set the component set to the application template.
#include "main.h"
#include "string.h"
#include "app_x-cube-ai.h"
#include <stdio.h>
#include "model.h"
#include "model_data.h"
/* Global handle to reference the instantiated C-model */
static ai_handle model = AI_HANDLE_NULL;
/* Global c-array to handle the activations buffer */
AI_ALIGNED(32)
static ai_u8 activations[AI_MODEL_DATA_ACTIVATIONS_SIZE];
/* Array to store the data of the input tensor */
AI_ALIGNED(32)
static ai_float in_data[AI_MODEL_IN_1_SIZE];
/* or static ai_u8 in_data[AI_NETWORK_IN_1_SIZE_BYTES]; */
/* c-array to store the data of the output tensor */
AI_ALIGNED(32)
static ai_float out_data[AI_MODEL_OUT_1_SIZE];
/* static ai_u8 out_data[AI_NETWORK_OUT_1_SIZE_BYTES]; */
/* Array of pointer to manage the model's input/output tensors */
static ai_buffer *ai_input;
static ai_buffer *ai_output;
int aiInit(void) {
ai_error err;
/* Create and initialize the c-model */
const ai_handle acts[] = { activations };
err = ai_model_create_and_init(&model, acts, NULL);
if (err.type != AI_ERROR_NONE) {
while(1){} };
/* Reteive pointers to the model's input/output tensors */
ai_input = ai_model_inputs_get(model, NULL);
//ai_input[0] = 1;
ai_output = ai_model_outputs_get(model, NULL);
return 0;
}
volatile ai_error err;
volatile ai_i32 n_batch;
volatile int float_stat;
volatile ai_buffer_format fmt_1;
volatile int float_stat_out;
volatile ai_buffer_format fmt_1_out;
int aiRun(const void *in_data, void *out_data) {
/* 1 - Update IO handlers with the data payload */
ai_input[0].data = AI_HANDLE_PTR(in_data);
ai_output[0].data = AI_HANDLE_PTR(out_data);
/* Extract format of the first input tensor (index 0) */
volatile const ai_buffer *ai_input_1 = &ai_input[0];
/* Extract format of the tensor */
fmt_1 = AI_BUFFER_FORMAT(ai_input_1);
//Extract the float value 1 = float type
float_stat = AI_BUFFER_FMT_GET_FLOAT(fmt_1);
/* Extract format of the first output tensor (index 0) */
volatile const ai_buffer *ai_output_1 = &ai_output[0];
/* Extract format of the tensor */
fmt_1_out = AI_BUFFER_FORMAT(ai_output_1);
//Extract the float value 1 = float type
float_stat_out = AI_BUFFER_FMT_GET_FLOAT(fmt_1_out);
/* 2 - Perform the inference */
n_batch = ai_model_run(model, &ai_input[0], &ai_output[0]);
if (n_batch != 1) {
err = ai_model_get_error(model);
};
return 0;
int main(void)
{
aiInit();
volatile float input_buffer_fl[4] = {0.02238, -0.06696, 0.00111 ,-0.12979} ;
while (1)
{
for(int i; i<4; i++){
in_data[i] = input_buffer_fl[i];
}
aiRun(in_data, out_data);
MX_X_CUBE_AI_Process();
}
}
