Hardware Configuration
No other hardware connected
Version
latest master (checkout manually)
IDE Name
Arduino IDE
Operating System
Windows 10
Flash frequency
80MHz
PSRAM enabled
yes
Upload speed
921600
Description:
I got an error that is related to kMaxInputNum in concatenation operation. I tried to change the kMaxInputNum value in concatenation.cc in the library to 19 but the error still raised. Is it possible for me to change the kMaxInputNum value or is there any solution to this error?
This is a visualization (from Netron) of TFLite model I used that combine 19 data:
// static tflite::MicroErrorReporter micro_error_reporter; // Error reporter
static tflite::MicroMutableOpResolver<7> resolver; // Modify this number based on the number of operations you need to add
// Add operations to the resolver
resolver.AddSplitV();
resolver.AddNotEqual();
resolver.AddZerosLike();
resolver.AddSelectV2();
resolver.AddConcatenation();
resolver.AddFullyConnected();
resolver.AddRelu();
const tflite::Model* model = tflite::GetModel(tflite_model);
if (model->version() != TFLITE_SCHEMA_VERSION) {
Serial.println("Model schema version mismatch!");
return;
}
// Get pointer to input tensor
TfLiteTensor* input_tensor = interpreter.input(0);
// Copy data to input tensor
for (size_t i = 0; i < 19; i++) {
input_tensor->data.f[i] = input_data[i];
}
Serial.println("line 55");
// Run inference
TfLiteStatus invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
Serial.println("Inference failed");
return;
}
Serial.println("line 64");
// Get the output from the model
TfLiteTensor* output_tensor = interpreter.output(0);
// Assuming output is a single float value
float output = output_tensor->data.f[0];
Serial.print("Model output: ");
Serial.println(output);
Serial.println("line 74--done");
}
void loop() {
// Empty loop
}
Debug message:
SR~���./managed_components/espressif__esp-tflite-micro/tensorflow/lite/micro/kernels/concatenation.cc:142 num_inputs <= kMaxInputNum was not true.
Node CONCATENATION (number 58f) failed to prepare with status 1
Board ESP32 Dev Module
Device Description Huzzah ESP32
Hardware Configuration No other hardware connected
Version latest master (checkout manually)
IDE Name Arduino IDE
Operating System Windows 10
Flash frequency 80MHz
PSRAM enabled yes
Upload speed 921600
Description:
I got an error that is related to kMaxInputNum in concatenation operation. I tried to change the kMaxInputNum value in concatenation.cc in the library to 19 but the error still raised. Is it possible for me to change the kMaxInputNum value or is there any solution to this error?
This is a visualization (from Netron) of TFLite model I used that combine 19 data:
Sketch:
include <_kiss_fft_guts.h>
include
// #include "tflite_model_euc_float32_quant_parquet2.h"
include "model_norm_euc_parquet.h"
// #include
include "tensorflow/lite/schema/schema_generated.h"
include "tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h"
include "tensorflow/lite/micro/micro_interpreter.h"
include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
// Create an area to store the model data constexpr int tensorArenaSize = 16 * 1024; // Adjust size as needed byte tensorArena[tensorArenaSize];
float input_data[19] = {15.7785, 294.6944, 150.8488, 58.0983, 67.3988, 72.9007, 30.7708, 31.4791, 16.6024, 10.7993, 4.1336, 5.9493, 6.0067, 7.3495, 5.0309, 4.456, 3.3453, 9.9208, 9.6588};
void setup() { Serial.begin(9600);
// static tflite::MicroErrorReporter micro_error_reporter; // Error reporter static tflite::MicroMutableOpResolver<7> resolver; // Modify this number based on the number of operations you need to add
// Add operations to the resolver resolver.AddSplitV(); resolver.AddNotEqual(); resolver.AddZerosLike(); resolver.AddSelectV2(); resolver.AddConcatenation(); resolver.AddFullyConnected(); resolver.AddRelu();
const tflite::Model* model = tflite::GetModel(tflite_model); if (model->version() != TFLITE_SCHEMA_VERSION) { Serial.println("Model schema version mismatch!"); return; }
static tflite::MicroInterpreter interpreter(model, resolver, tensorArena, tensorArenaSize, nullptr);
TfLiteStatus allocate_status = interpreter.AllocateTensors(); if (allocate_status != kTfLiteOk) { Serial.println("Tensor allocation failed"); return; }
Serial.println("line 46");
// Get pointer to input tensor TfLiteTensor* input_tensor = interpreter.input(0); // Copy data to input tensor for (size_t i = 0; i < 19; i++) { input_tensor->data.f[i] = input_data[i]; }
Serial.println("line 55");
// Run inference TfLiteStatus invoke_status = interpreter.Invoke(); if (invoke_status != kTfLiteOk) { Serial.println("Inference failed"); return; }
Serial.println("line 64");
// Get the output from the model TfLiteTensor* output_tensor = interpreter.output(0);
// Assuming output is a single float value float output = output_tensor->data.f[0]; Serial.print("Model output: "); Serial.println(output);
Serial.println("line 74--done"); }
void loop() { // Empty loop }
Debug message:
SR~���./managed_components/espressif__esp-tflite-micro/tensorflow/lite/micro/kernels/concatenation.cc:142 num_inputs <= kMaxInputNum was not true.
Node CONCATENATION (number 58f) failed to prepare with status 1
Tensor allocation failed