Open vishalvky007 opened 2 weeks ago
👋 Hello @vishalvky007, thank you for your interest in Ultralytics YOLOv8 🚀! We recommend a visit to the Docs for new users where you can find many Python and CLI usage examples and where many of the most common questions may already be answered.
If this is a 🐛 Bug Report, please provide a minimum reproducible example to help us debug it.
If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our Tips for Best Training Results.
Join the vibrant Ultralytics Discord 🎧 community for real-time conversations and collaborations. This platform offers a perfect space to inquire, showcase your work, and connect with fellow Ultralytics users.
Pip install the ultralytics
package including all requirements in a Python>=3.8 environment with PyTorch>=1.8.
pip install ultralytics
YOLOv8 may be run in any of the following up-to-date verified environments (with all dependencies including CUDA/CUDNN, Python and PyTorch preinstalled):
If this badge is green, all Ultralytics CI tests are currently passing. CI tests verify correct operation of all YOLOv8 Modes and Tasks on macOS, Windows, and Ubuntu every 24 hours and on every commit.
Hello! It seems like there's a mismatch between the expected output tensor shape from your TensorFlow Lite model and the shape defined in your Java code for handling the output.
Make sure the shape you're specifying in Java matches the model's final output. Based on the error, your model returns a tensor of shape [1, 8, 8400], so your Java object should reflect this shape to avoid shape incompatibility issues.
Here's how you might adjust your Java side:
// Instead, your output shape here should likely correlate with the shape [1, 8, 8400]
float[][][] output = new float[1][8][8400];
Adjust the dimensions accordingly to match your specific model outputs! If the error persists or there are further details you can provide, don't hesitate to reach out. Happy coding! 😊
package org.tensorflow.lite.examples.detection.tflite;
import android.content.res.AssetFileDescriptor; import android.content.res.AssetManager; import android.graphics.Bitmap; import android.graphics.RectF; import android.os.Trace; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Vector; import org.tensorflow.lite.Interpreter; import org.tensorflow.lite.examples.detection.env.Logger;
public class TFLiteObjectDetectionAPIModel implements Classifier { private static final Logger LOGGER = new Logger();
// Only return this many results.
private static final int NUM_DETECTIONS = 10;
// Float model
private static final float IMAGE_MEAN = 128.0f;
private static final float IMAGE_STD = 128.0f;
// Number of threads in the java app
private static final int NUM_THREADS = 4;
private boolean isModelQuantized;
// Config values.
private int inputSize;
// Pre-allocated buffers.
private final Vector
private ByteBuffer imgData;
private Interpreter tfLite;
private TFLiteObjectDetectionAPIModel() {}
/* Memory-map the model file in Assets. / private static MappedByteBuffer loadModelFile(AssetManager assets, String modelFilename) throws IOException { AssetFileDescriptor fileDescriptor = assets.openFd(modelFilename); FileInputStream inputStream = new FileInputStream(fileDescriptor.getFileDescriptor()); FileChannel fileChannel = inputStream.getChannel(); long startOffset = fileDescriptor.getStartOffset(); long declaredLength = fileDescriptor.getDeclaredLength(); return fileChannel.map(FileChannel.MapMode.READ_ONLY, startOffset, declaredLength); }
/**
@param isQuantized Boolean representing model is quantized or not */ public static Classifier create( final AssetManager assetManager, final String modelFilename, final String labelFilename, final int inputSize, final boolean isQuantized) throws IOException { final TFLiteObjectDetectionAPIModel d = new TFLiteObjectDetectionAPIModel();
InputStream labelsInput = null; String actualFilename = labelFilename.split("file:////android_asset/")[1]; labelsInput = assetManager.open(actualFilename); BufferedReader br = null; br = new BufferedReader(new InputStreamReader(labelsInput)); String line; while ((line = br.readLine()) != null) { LOGGER.w(line); d.labels.add(line); } br.close();
d.inputSize = 2100;
try { d.tfLite = new Interpreter(loadModelFile(assetManager, modelFilename)); } catch (Exception e) { throw new RuntimeException(e); }
d.isModelQuantized = isQuantized; // Pre-allocate buffers. int numBytesPerChannel; if (isQuantized) { numBytesPerChannel = 1; // Quantized } else { numBytesPerChannel = 4; // Floating point } d.imgData = ByteBuffer.allocateDirect(1 inputSize inputSize 3 numBytesPerChannel); d.imgData.order(ByteOrder.nativeOrder()); d.intValues = new int[d.inputSize * d.inputSize];
d.tfLite.setNumThreads(NUM_THREADS); d.outputLocations = new float[1][NUM_DETECTIONS][4]; d.outputClasses = new float[1][NUM_DETECTIONS]; d.outputScores = new float[1][NUM_DETECTIONS]; d.numDetections = new float[1]; return d; }
@Override
public List
Trace.beginSection("preprocessBitmap"); // Preprocess the image data from 0-255 int to normalized float based // on the provided parameters. bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
imgData.rewind(); for (int i = 0; i < inputSize; ++i) { for (int j = 0; j < inputSize; ++j) { int pixelValue = intValues[i * inputSize + j]; if (isModelQuantized) { // Quantized model imgData.put((byte) ((pixelValue >> 16) & 0xFF)); imgData.put((byte) ((pixelValue >> 8) & 0xFF)); imgData.put((byte) (pixelValue & 0xFF)); } else { // Float model imgData.putFloat((((pixelValue >> 16) & 0xFF) - IMAGE_MEAN) / IMAGE_STD); imgData.putFloat((((pixelValue >> 8) & 0xFF) - IMAGE_MEAN) / IMAGE_STD); imgData.putFloat(((pixelValue & 0xFF) - IMAGE_MEAN) / IMAGE_STD); } } } Trace.endSection(); // preprocessBitmap
// Copy the input data into TensorFlow. Trace.beginSection("feed"); outputLocations = new float[1][NUM_DETECTIONS][4]; outputClasses = new float[1][NUM_DETECTIONS]; outputScores = new float[1][NUM_DETECTIONS]; numDetections = new float[1];
Object[] inputArray = {imgData}; Map<Integer, Object> outputMap = new HashMap<>();
outputMap.put(0, outputLocations); outputMap.put(1, outputClasses); outputMap.put(2, outputScores); outputMap.put(3, numDetections); Trace.endSection();
// Run the inference call. Trace.beginSection("run"); tfLite.runForMultipleInputsOutputs(inputArray, outputMap); Trace.endSection();
// Show the best detections.
// after scaling them back to the input size.
final ArrayList
@Override public void enableStatLogging(final boolean logStats) {}
@Override public String getStatString() { return ""; }
@Override public void close() {}
public void setNumThreads(int num_threads) { if (tfLite != null) tfLite.setNumThreads(num_threads); }
@Override public void setUseNNAPI(boolean isChecked) { if (tfLite != null) tfLite.setUseNNAPI(isChecked); } }
I'm not getting the Java Object where it is declared?
Search before asking
Question
Getting this error java.lang.IllegalArgumentException: Cannot copy from a TensorFlowLite tensor (Identity) with shape [1, 8, 8400] to a Java object with shape [1, 10, 4].
Additional
No response