onnx / tutorials

Tutorials for creating and using ONNX models
Apache License 2.0
3.39k stars 629 forks source link

RetinaFace After convert model to onnx and serving model: How to post process output result? #220

Open Vuong02011996 opened 4 years ago

Vuong02011996 commented 4 years ago

img` = Image.open("/home/vuong/Downloads/worlds-largest-selfie.jpg") img = img.resize((640, 480), Image.BILINEAR) img_data = np.array(img) img_data = np.transpose(img_data, [2, 0, 1]) img_data = np.expand_dims(img_data, 0) mean_vec = np.array([0.485, 0.456, 0.406]) stddev_vec = np.array([0.229, 0.224, 0.225]) norm_img_data = np.zeros(img_data.shape).astype('float32') for i in range(img_data.shape[1]): norm_img_data[:, i, :, :] = (img_data[:, i, :, :] / 255 - mean_vec[i]) / stddev_vec[i] input_tensor = onnx_ml_pb2.TensorProto() input_tensor.dims.extend(norm_img_data.shape) input_tensor.data_type = 1 input_tensor.raw_data = norm_img_data.tobytes() request_message = predict_pb2.PredictRequest()

For your model, the inputs name should be something else customized by yourself. Use Netron to find out the input name.

request_message.inputs["input"].data_type = input_tensor.data_type
request_message.inputs["input"].dims.extend(input_tensor.dims)
request_message.inputs["input"].raw_data = input_tensor.raw_data

content_type_headers = ['application/x-protobuf', 'application/octet-stream', 'application/vnd.google.protobuf']

for h in content_type_headers:
    request_headers = {
        'Content-Type': h,
        'Accept': 'application/x-protobuf'
    }

PORT_NUMBER = 9001  # Change appropriately if needed based on any changes when invoking the server in the pre-requisites
inference_url = "http://127.0.0.1:" + str(PORT_NUMBER) + "/v1/models/default/versions/1:predict"
response = requests.post(inference_url, headers=request_headers, data=request_message.SerializeToString())

response_message = predict_pb2.PredictResponse()
response_message.ParseFromString(response.content)

# For your model, the outputs names should be something else customized by yourself. Use Netron to find out the outputs names.
output1 = np.frombuffer(response_message.outputs['output1'].raw_data, dtype=np.float32)
output2 = np.frombuffer(response_message.outputs['output2'].raw_data, dtype=np.float32)
output3 = np.frombuffer(response_message.outputs['output3'].raw_data, dtype=np.float32)
output4 = np.frombuffer(response_message.outputs['output4'].raw_data, dtype=np.float32)
output5 = np.frombuffer(response_message.outputs['output5'].raw_data, dtype=np.float32)
output6 = np.frombuffer(response_message.outputs['output6'].raw_data, dtype=np.float32)
output7 = np.frombuffer(response_message.outputs['output7'].raw_data, dtype=np.float32)
output8 = np.frombuffer(response_message.outputs['output8'].raw_data, dtype=np.float32)
output9 = np.frombuffer(response_message.outputs['output9'].raw_data, dtype=np.float32)
output10 = np.frombuffer(response_message.outputs['output10'].raw_data, dtype=np.float32)

print('output1 shape:', response_message.outputs['output1'].dims)
print('output2 shape:', response_message.outputs['output2'].dims)
print('output3 shape:', response_message.outputs['output3'].dims)
print('output4 shape:', response_message.outputs['output4'].dims)
print('output5 shape:', response_message.outputs['output5'].dims)
print('output6 shape:', response_message.outputs['output6'].dims)
print('output7 shape:', response_message.outputs['output7'].dims)
print('output8 shape:', response_message.outputs['output8'].dims)
print('output9 shape:', response_message.outputs['output9'].dims)
print('output10 shape:', response_message.outputs['output10'].dims)`

` => output1 shape: [1, 720, 60, 80] output2 shape: [1, 720, 30, 40] output3 shape: [1, 720, 15, 20] output4 shape: [1, 720, 8, 10] output5 shape: [1, 720, 4, 5] output6 shape: [1, 36, 60, 80] output7 shape: [1, 36, 30, 40] output8 shape: [1, 36, 15, 20] output9 shape: [1, 36, 8, 10] output10 shape: [1, 36, 4, 5]