PINTO0309 / openvino2tensorflow

This script converts the ONNX/OpenVINO IR model to Tensorflow's saved_model, tflite, h5, tfjs, tftrt(TensorRT), CoreML, EdgeTPU, ONNX and pb. PyTorch (NCHW) -> ONNX (NCHW) -> OpenVINO (NCHW) -> openvino2tensorflow -> Tensorflow/Keras (NHWC/NCHW) -> TFLite (NHWC/NCHW). And the conversion from .pb to saved_model and from saved_model to .pb and from .pb to .tflite and saved_model to .tflite and saved_model to onnx. Support for building environments with Docker. It is possible to directly access the host PC GUI and the camera to verify the operation. NVIDIA GPU (dGPU) support. Intel iHD GPU (iGPU) support.
MIT License
338 stars 40 forks source link

Output different by onnx->openvino->onnx #85

Closed jnulzl closed 2 years ago

jnulzl commented 2 years ago

Issue Type

Support

OS

Ubuntu

OS architecture

x86_64

Programming Language

Python

Framework

OpenVINO, ONNX

Download URL for ONNX / OpenVINO IR

origin onnx model is jnulzl_onnx.zip

Description

Description:

I want to merge bn to conv layer in onnx model by openvino and openvino2tensorflow. But converted onnx model output is different with origin onnx by openvino2tensorflow. I have tested, origin onnx model output is same with openvino xml/bin model. But after converted by openvino2tensorflow, the converted onnx output is different with origin onnx model.

software version :

openvino_2021.4.582

tensorflow 2.4.2

onnx 1.10.2

openvino2tensorflow 1.26.0

Relevant Log Output

This is error

Source code for simple inference testing code

Onnx demo:

import cv2
import os
import sys
import numpy as np
import onnx
import onnxruntime as ort

output_path = "model_float32.onnx"
pose_model = ort.InferenceSession(output_path)

input_data = cv2.imread("jnulzl.jpg")
input_data = cv2.resize(input_data, (320, 320))
input_data = input_data.reshape(1, 320, 320, 3)#.transpose(0, 3, 1, 2)
input_data = input_data.astype(np.float32)

# input_name = 'input'
# output_name = ['output1', 'output2', 'output3']
input_name = 'inputs'
output_name = ['Identity_2', 'Identity_1', 'Identity']
output = pose_model.run(output_name, {input_name: input_data})

openvino demo:

import numpy as np
import os
import cv2
from openvino.inference_engine import IENetwork, IECore

def load_model(img_path, xml_path, device):

    print("Loading Inference Engine")
    ie = IECore()
    print("Device info:")
    versions = ie.get_versions(device)
    print("{}{}".format(" "*8, device))
    print("{}MKLDNNPlugin version ......... {}.{}".format(" "*8, versions[device].major, versions[device].minor))
    print("{}Build ........... {}".format(" "*8, versions[device].build_number))

    name = os.path.splitext(xml_path)[0]
    bin_path = name + '.bin'
    print("Pose Detection model - Reading network files:\n\t{}\n\t{}".format(xml_path, bin_path))
    pd_net = ie.read_network(model=xml_path, weights=bin_path)
    # Input blob: input:0 - shape: [1, 3, 256, 256] (lightning)
    # Output blob: Identity - shape: [1, 6, 56]
    pd_input_blob = next(iter(pd_net.input_info))
    print(f"Input blob: {pd_input_blob} - shape: {pd_net.input_info[pd_input_blob].input_data.shape}")
    _,_,pd_h,pd_w = pd_net.input_info[pd_input_blob].input_data.shape
    for o in pd_net.outputs.keys():
        print(f"Output blob: {o} - shape: {pd_net.outputs[o].shape}")
    pd_kps = "Identity"
    print("Loading pose detection model into the plugin")
    pd_exec_net = ie.load_network(network=pd_net, num_requests=1, device_name=device)

    input_data = cv2.imread(img_path)
    input_data = cv2.resize(input_data, (320, 320))
    input_data = input_data.reshape(1, 320, 320, 3).transpose(0, 3, 1, 2)
    frame_nn = input_data.astype(np.float32)

    inference = pd_exec_net.infer(inputs={pd_input_blob: frame_nn})
    print(inference)
if __name__ == "__main__":
    img_path = "jnulzl.jpg"
    xml_path = "xxx.xml"
    device = "CPU"
    load_model(img_path, xml_path, device)
PINTO0309 commented 2 years ago

Assuming you read the README properly, I had no idea what the problem was. https://github.com/PINTO0309/openvino2tensorflow#6-7-replace-weights-or-constant-values-in-const-op-and-add-transpose-or-reshape-or-cast-just-beforeafter-the-operation-specified-by-layer_id

H=320 W=320 MODEL=jnulzl

$INTEL_OPENVINO_DIR/deployment_tools/model_optimizer/mo.py \ --input_model ${MODEL}.onnx \ --data_type FP32 \ --output_dir savedmodel${MODEL}_${H}x${W}/openvino/FP32

openvino2tensorflow \ --model_path savedmodel${MODEL}_${H}x${W}/openvino/FP32/${MODEL}.xml \ --output_saved_model \ --output_pb \ --output_no_quant_float32_tflite \ --weight_replacement_config replace.json

openvino2tensorflow \ --model_path savedmodel${MODEL}_${H}x${W}/openvino/FP32/${MODEL}.xml \ --output_saved_model \ --output_pb \ --output_onnx \ --onnx_opset 11 \ --weight_replacement_config replace.json \ --keep_input_tensor_in_nchw

- test.py
```python
import numpy as np
import onnxruntime as ort
from pprint import pprint

input_data = np.ones((1,3,320,320), dtype=np.float32)

### original
output_path = "jnulzl.onnx"
model = ort.InferenceSession(output_path)
input_name = 'input'
output_names = ['output1', 'output2', 'output3']
output_original = model.run(output_names, {input_name: input_data})

### converted
output_path = "saved_model_jnulzl_320x320/model_float32.onnx"
model = ort.InferenceSession(output_path)
input_name = 'input'
output_names = ['tf.identity', 'tf.identity_1', 'tf.identity_2']
output_converted = model.run(output_names, {input_name: input_data})
pprint(output_converted)

print(f'output index 0 : output_original:{output_original[0].shape} output_converted:{output_converted[2].shape}')
print(f'output index 1 : output_original:{output_original[1].shape} output_converted:{output_converted[1].shape}')
print(f'output index 2 : output_original:{output_original[2].shape} output_converted:{output_converted[0].shape}')

print('output values 0')
pprint(output_original[0])
pprint(output_converted[2])

print('output values 1')
pprint(output_original[1])
pprint(output_converted[1])

print('output values 2')
pprint(output_original[2])
pprint(output_converted[0])

"""
output values 0
array([[[[3.01766127e-01, 5.23263276e-01, 5.30654371e-01,
          4.89401340e-01, 2.96235085e-05, 9.85341191e-01],
         [4.91656095e-01, 4.87452626e-01, 6.02180481e-01,
          5.19975662e-01, 1.36196613e-05, 9.85447288e-01],
         [5.66398382e-01, 4.81625199e-01, 6.00098670e-01,
          5.27195096e-01, 1.14142895e-05, 9.85086560e-01],

array([[[[3.01766157e-01, 5.23263276e-01, 5.30654371e-01,
          4.89401311e-01, 2.96831131e-05, 9.85341191e-01],
         [4.91655886e-01, 4.87452686e-01, 6.02180481e-01,
          5.19975662e-01, 1.36196613e-05, 9.85447288e-01],
         [5.66398263e-01, 4.81625229e-01, 6.00098670e-01,
          5.27195096e-01, 1.13844872e-05, 9.85086560e-01],

output values 1
array([[[[4.5055005e-01, 3.8157779e-01, 4.4188267e-01, 3.4043151e-01,
          3.0100346e-06, 9.8180491e-01],
         [5.7248467e-01, 4.3746626e-01, 4.7871464e-01, 3.5527712e-01,
          3.1292439e-06, 9.8247206e-01],
         [5.0388503e-01, 4.3514246e-01, 5.5713403e-01, 3.5947865e-01,
          2.3543835e-06, 9.8205173e-01],

array([[[[4.5055014e-01, 3.8157776e-01, 4.4188270e-01, 3.4043151e-01,
          3.0100346e-06, 9.8180491e-01],
         [5.7248461e-01, 4.3746620e-01, 4.7871462e-01, 3.5527712e-01,
          3.0696392e-06, 9.8247206e-01],
         [5.0388491e-01, 4.3514246e-01, 5.5713403e-01, 3.5947865e-01,
          2.3543835e-06, 9.8205173e-01],

output values 2
array([[[[6.07297122e-01, 7.17708766e-01, 3.46558690e-01,
          3.99648368e-01, 9.35792923e-06, 9.80870485e-01],
         [3.56925786e-01, 7.37659812e-01, 3.99764746e-01,
          3.85306805e-01, 2.56001949e-05, 9.81061101e-01],
         [3.93503606e-01, 6.97192192e-01, 4.71485436e-01,
          4.00504529e-01, 8.49366188e-06, 9.80911136e-01],

array([[[[6.07297122e-01, 7.17708826e-01, 3.46558690e-01,
          3.99648368e-01, 9.35792923e-06, 9.80870485e-01],
         [3.56925756e-01, 7.37659693e-01, 3.99764746e-01,
          3.85306776e-01, 2.56001949e-05, 9.81061101e-01],
         [3.93503726e-01, 6.97192192e-01, 4.71485466e-01,
          4.00504559e-01, 8.49366188e-06, 9.80911136e-01],
"""
PINTO0309 commented 2 years ago

Closed due to lack of progress.