abdelaziz-mahdy / pytorch_lite

flutter package to help run pytorch lite models classification and YoloV5 and YoloV8.
MIT License
51 stars 22 forks source link

The code works great when the phone is portrait , But when the phone is placed landscape, it does not work #33

Closed maheralzoubi97 closed 1 month ago

maheralzoubi97 commented 1 year ago

import 'package:camera/camera.dart'; import 'package:flutter/material.dart'; import 'package:flutter/services.dart'; import 'package:pytorch_lite/pigeon.dart'; import 'package:pytorch_lite/pytorch_lite.dart';

import 'camera_view_singleton.dart';

/// [CameraView] sends each frame for inference class CameraView extends StatefulWidget { /// Callback to pass results after inference to [HomeView] final Function(List<ResultObjectDetection?> recognitions) resultsCallback; final Function(String classification) resultsCallbackClassification;

/// Constructor const CameraView(this.resultsCallback, this.resultsCallbackClassification, {Key? key}) : super(key: key); @override _CameraViewState createState() => _CameraViewState(); }

class _CameraViewState extends State with WidgetsBindingObserver { /// List of available cameras late List cameras;

/// Controller CameraController? cameraController;

/// true when inference is ongoing bool predicting = false;

ModelObjectDetection? _objectModel; ClassificationModel? _imageModel;

bool classification = false; @override void initState() { super.initState(); initStateAsync(); }

//load your model Future loadModel() async { String pathObjectDetectionModel = "assets/models/best2.torchscript"; try { _objectModel = await PytorchLite.loadObjectDetectionModel( pathObjectDetectionModel, 24, 640, 640, labelPath: "assets/labels/labels_objectDetection_Coco.txt", objectDetectionModelType: ObjectDetectionModelType.yolov8, ); } catch (e) { if (e is PlatformException) { print("only supported for android, Error is $e"); } else { print("Error is $e"); } } }

void initStateAsync() async { WidgetsBinding.instance.addObserver(this); await loadModel();

// Camera initialization
initializeCamera();

// Initially predicting = false
// predicting = false;

}

/// Initializes the camera by setting [cameraController] void initializeCamera() async { cameras = await availableCameras();

// cameras[0] for rear-camera
cameraController = CameraController(cameras[0], ResolutionPreset.veryHigh,
    enableAudio: false);

cameraController?.initialize().then((_) async {
  // Stream of image passed to [onLatestImageAvailable] callback
  await cameraController?.startImageStream(onLatestImageAvailable);

  /// previewSize is size of each image frame captured by controller
  ///
  /// 352x288 on iOS, 240p (320x240) on Android with ResolutionPreset.low
  Size? previewSize = cameraController?.value.previewSize;

  /// previewSize is size of raw input image to the model
  CameraViewSingleton.inputImageSize = previewSize!;

  // the display width of image on screen is
  // same as screenWidth while maintaining the aspectRatio
  Size screenSize = MediaQuery.of(context).size;
  CameraViewSingleton.screenSize = screenSize;
  CameraViewSingleton.ratio = screenSize.width / previewSize.height;
});

}

@override Widget build(BuildContext context) { // Return empty container while the camera is not initialized if (cameraController == null || !cameraController!.value.isInitialized) { return Container(); }

// return CameraPreview(cameraController!);
return cameraController!.buildPreview();

// return AspectRatio(
//     // aspectRatio: cameraController.value.aspectRatio,
//     child: CameraPreview(cameraController));

}

runObjectDetection(CameraImage cameraImage) async { if (_objectModel != null) { List<ResultObjectDetection?> objDetect = await _objectModel! .getImagePredictionFromBytesList( cameraImage.planes.map((e) => e.bytes).toList(), cameraImage.width, cameraImage.height, minimumScore: 0.3, iOUThreshold: 0.3);

  print("data outputted $objDetect");
  widget.resultsCallback(objDetect);
}

}

/// Callback to receive each frame [CameraImage] perform inference on it onLatestImageAvailable(CameraImage cameraImage) async { if (predicting) { return; } predicting = true;

var futures = <Future>[];

futures.add(runObjectDetection(cameraImage));
await Future.wait(futures);

predicting = false;

}

@override void didChangeAppLifecycleState(AppLifecycleState state) async { switch (state) { case AppLifecycleState.paused: cameraController?.stopImageStream(); break; case AppLifecycleState.resumed: if (!cameraController!.value.isStreamingImages) { await cameraController?.startImageStream(onLatestImageAvailable); } break; default: } }

@override void dispose() { WidgetsBinding.instance.removeObserver(this); cameraController?.dispose(); super.dispose(); } }

Could you please help me?

abdelaziz-mahdy commented 1 year ago

In landscape on Android or iOS? And what happens exactly

Does it show wrong results, or wrong boxes placements, shows nothing?

Please provide all the information about what happens so I can help you better

maheralzoubi97 commented 1 year ago

in landscape on Android

show wrong results and show wrong boxes placement

in case real time object detection @zezo357

abdelaziz-mahdy commented 1 year ago

can you provide an example ? it will help me greatly, both the right and wrong ones

most probably i will work on it after i finish the opencv stuff, since i am thinking of moving the camera decoding logic to opencv, to be much faster

maheralzoubi97 commented 1 year ago

show this image Screenshot_2023-08-07-11-34-19-10_0047048753e5e8c75bac1ea8904c47dd Screenshot_2023-08-07-11-33-58-54_0047048753e5e8c75bac1ea8904c47dd

abdelaziz-mahdy commented 1 year ago

Thank you

abdelaziz-mahdy commented 1 month ago

fixed by https://github.com/abdelaziz-mahdy/pytorch_lite/pull/81 and released