Closed MinGiSa closed 3 months ago
Please refer to deploy/benchmark.py
for the onnx inference code.
What hardware are you running on? If CPU, this result is reasonable.
Please refer to
deploy/benchmark.py
for the onnx inference code.What hardware are you running on? If CPU, this result is reasonable.
thank you. i forgot to use cuda provider and should install onnxruntime-gpu, not a onnxruntime
hi, could you please provide the onnx you have converted, thanks very much~
I used your code to convert to ONNX, and I wrote the inference code myself. However, when I checked the time, it takes more than 200ms per image. If possible, could you provide torch inference code or onnx inference code? I used my code as follows.
====================================== import os os.environ['CUDA_MODULE_LOADING'] = 'LAZY' import torchvision import argparse import numpy as np from PIL import Image import cv2 import onnxruntime as nxrun import torch import torchvision.transforms as T import tqdm import time
def parser_args(): parser = argparse.ArgumentParser('Object detection using ONNX model') parser.add_argument('--path', type=str, required=True, help='ONNX model file path') parser.add_argument('--image_dir', type=str, required=True, help='Directory containing images to run inference on') parser.add_argument('--output_dir', type=str, required=True, help='Directory to save output images with detections') parser.add_argument('--threshold', type=float, default=0.5, help='Score threshold for displaying bounding boxes') parser.add_argument('--iou_threshold', type=float, default=0.5, help='IoU threshold for non-max suppression') parser.add_argument('--class_names', type=str, required=True, help='Path to class names file') return parser.parse_args()
def findClassNameYOLO(annotationPath): with open(annotationPath, 'r') as file: className = file.read().splitlines() return className
def load_image(file_path): return Image.open(file_path).convert("RGB")
def infer_transforms(): normalize = T.Compose([ T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) return T.Compose([ T.Resize((640, 640)), normalize, ])
def box_cxcywh_to_xyxy(x): x_c, y_c, w, h = x.unbind(-1) b = [(x_c - 0.5 w.clamp(min=0.0)), (y_c - 0.5 h.clamp(min=0.0)), (x_c + 0.5 w.clamp(min=0.0)), (y_c + 0.5 h.clamp(min=0.0))] return torch.stack(b, dim=-1)
def soft_nms(boxes, scores, iou_threshold=0.5, sigma=0.5, score_threshold=0.001): N = boxes.shape[0] indexes = torch.arange(0, N, dtype=torch.float).view(N, 1) dets = torch.cat((boxes, scores.view(N, 1), indexes), dim=1) keep = []
def generateColors(numClass): colors = [] golden_ratio_conjugate = 0.618033988749895 hue = 0
def hsv2rgb(h, s, v): if s == 0.0: return (v, v, v)
def post_process(outputs, target_sizes, iou_threshold, confidence_threshold): out_logits, out_bbox = outputs['labels'], outputs['dets']
def saveImage(image, predictions, className, destPath, fileName, colors, original_size, resized_size): num_detections, detected_boxes, detected_scores, detected_labels = predictions
def infer_onnx(sess, image_dir, output_dir, threshold, iou_threshold, class_names): os.makedirs(output_dir, exist_ok=True) log_file_path = os.path.join(output_dir, "inference_log.txt")
def main(): args = parser_args() class_names = findClassNameYOLO(args.class_names) sess = nxrun.InferenceSession(args.path) infer_onnx(sess, args.image_dir, args.output_dir, args.threshold, args.iou_threshold, class_names)
if name == 'main': main()
=================================================
Min Inference Time: 217.61 ms Max Inference Time: 295.26 ms Avg Inference Time: 236.82 ms