27#ifndef AIALGO_SEQUENTIAL_INFERENCE
28#define AIALGO_SEQUENTIAL_INFERENCE
void aialgo_quantize_model_f32_to_q7(aimodel_t *model_f32, aimodel_t *model_q7, aitensor_t *representative_dataset)
Quantize model parameters (weights and bias)
void aialgo_set_model_result_precision_q31(aimodel_t *model, uint16_t shift)
Initialize the quantization parameters of the layer results for Q31 data type.
void aialgo_distribute_parameter_memory(aimodel_t *model, void *memory_ptr, uint32_t memory_size)
Assign the memory for the trainable parameters (like weights, bias, ...) of the model.
void aialgo_print_model_structure(aimodel_t *model)
Print the layer structure of the model with the configured parameters.
uint8_t aialgo_compile_model(aimodel_t *model)
Initialize the model structure.
void aialgo_set_model_gradient_precision_q31(aimodel_t *model, uint16_t shift)
Initialize the quantization parameters of the gradients for Q31 data type.
void aialgo_set_model_delta_precision_q31(aimodel_t *model, uint16_t shift)
Initialize the quantization parameters of the layer deltas for Q31 data type.
uint8_t aialgo_schedule_inference_memory(aimodel_t *model, void *memory_ptr, uint32_t memory_size)
Assign the memory for intermediate results of an inference to the model.
uint32_t aialgo_sizeof_inference_memory(aimodel_t *model)
Calculate the memory requirements for intermediate results of an inference.
aitensor_t * aialgo_inference_model(aimodel_t *model, aitensor_t *input_data, aitensor_t *output_data)
Perform an inference on the model / Run the model.
aitensor_t * aialgo_forward_model(aimodel_t *model, aitensor_t *input_data)
Perform a forward pass on the model.
uint32_t aialgo_sizeof_parameter_memory(aimodel_t *model)
Calculate the memory requirements for the trainable parameters (like weights, bias,...
Default implementation of the Dense layer .
Basic data-type independent math operations.
AIfES artificial neural network model.
Definition: aifes_core.h:181
A tensor in AIfES.
Definition: aifes_math.h:92