hunglc007 / tensorflow-yolov4-tflite

YOLOv4, YOLOv4-tiny, YOLOv3, YOLOv3-tiny Implemented in Tensorflow 2.0, Android. Convert YOLO v4 .weights tensorflow, tensorrt and tflite
https://github.com/hunglc007/tensorflow-yolov4-tflite
MIT License
2.24k stars 1.24k forks source link

How to load more than 1 model with tensorflow2 object detection api in flask with Blueprint? #331

Open LeamonLee opened 3 years ago

LeamonLee commented 3 years ago

I would like to integrate my trained object detection model using tensorflow2 object detection api with flask and its Blueprint functionality. The thing is, if I only load 1 model, it works perfectly fine, but if I load more than 2 models, all the models except the last one would throw an error while predicting.

Here's the error message:

Error while reading resource variable batch_normalization_91/gamma_127480 from Container: localhost. This could mean that the variable was uninitialized. Not found: Resource localhost/batch_normalization_91/gamma_127480/class tensorflow::Var does not exist. [[{{node StatefulPartitionedCall/functional_1/batch_normalization_91/ReadVariableOp}}]] [Op:__inference_signature_wrapper_75712] Function call stack: signature_wrapper

Package Version:

OS: Windows 10

I did google myself for this error, and it seems have to do with the graph or the session which are the concept in tensorflow 1. So I have no idea how to deal with this in tensorflow2. If you need more info, please let me know, thanks.

Here's the code snippet:

from flask import request, Blueprint, jsonify, Response
import os, base64
import cv2
import numpy as np

import tensorflow as tf
import core.utils as utils
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants

objdetect = Blueprint('objdetect', __name__, url_prefix='/od')
dctModelConfig = {
  "lpr": {
    "namesPath": "./data/classes/lpr_custom.names",
    "modelPath": "checkpoints/yolov4-custom-lpr-416"
  },
  "facemask": {
    "namesPath": "./data/classes/facemask_custom.names",
    "modelPath": "checkpoints/yolov4-custom-facemask-416"
  },
  "coronavirus": {
    "namesPath": "./data/classes/coronavirus_custom.names",
    "modelPath": "checkpoints/yolov4-custom-coronavirus-416"
  }
}
dctInfer = {}
INPUT_IMAGE_SIZE = 416
IOU = 0.45
SCORE = 0.25
VIDEO_OUTPUT_FORMAT = 'MP4V'

class ODDetector:

    def __init__(self, weightsPath):
        tf.keras.backend.clear_session()
        saved_model_loaded = tf.saved_model.load(weightsPath, tags=[tag_constants.SERVING])
        self.model = saved_model_loaded.signatures['serving_default']

    def predict(self, batch_data):
        pred_bbox = self.model(batch_data)    # Failed at this line
        return pred_bbox

for k,v in dctModelConfig .items(): 
    weightsPath = os.path.join(app.root_path, v["modelPath"])
    dctInfer[k] = ODDetector(weightsPath)

@objdetect.route('/image_detect/<classesName>', methods=['POST'])
def image_detect(classesName):

    if classesName not in dctInfer.keys():
        return jsonify({"response": "classesName is not allowed"}), 400

    image = request.files["images"]
    imageFileName = image.filename
    saveImagePath = os.path.join(app.config["IMAGE_UPLOADS"], imageFileName)
    image.save(saveImagePath)
    print("saveImagePath: ", saveImagePath)
    if imageFileName != "":
        original_image = cv2.imread(saveImagePath)
        original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)

        image_data = cv2.resize(original_image, (INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE))
        image_data = image_data / 255.
        image_data = image_data[np.newaxis, ...].astype(np.float32)
        batch_data = tf.constant(image_data)

        pred_bbox = dctInfer[classesName].predict(batch_data)
        # pred_bbox = dctInfer[classesName].model(batch_data)
        print("pred_bbox: ", pred_bbox)
        for key, value in pred_bbox.items():
            boxes = value[:, :, 0:4]
            pred_conf = value[:, :, 4:]

        boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
            boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
            scores=tf.reshape(
                pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
            max_output_size_per_class=50,
            max_total_size=50,
            iou_threshold=IOU,
            score_threshold=SCORE
        )

        pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
        detectedImage = utils.draw_bbox_by_classes(original_image, pred_bbox, classesName=classesName)
        detectedImage = cv2.cvtColor(detectedImage, cv2.COLOR_BGR2RGB)
        detectedImageFileName = imageFileName.split('.')[0] + '_detected' + '.' + imageFileName.split('.')[1]
        detectedImagePath = os.path.join(app.config["IMAGE_UPLOADS"], detectedImageFileName)
        print("detectedImagePath: ", detectedImagePath)
        cv2.imwrite(detectedImagePath, detectedImage)

        try:
          with open(detectedImagePath, "rb") as image_file:
            encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
          print("encoded_string: ", encoded_string)
          img_url = f'data:image/jpg;base64,{encoded_string}'
        except Exception as e:
          print(e)
        return img_url

    else:
        return jsonify({"response": "FileNotFoundError"}), 400