Closed thePrimeTux closed 6 days ago
Turns out I was drawing it wrong. Using the below function solved my problem.
COLORS = np.random.randint(0, 255, size=(90, 3), dtype=np.uint8)
labels = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
def get_label(class_id):
global labels
return labels[class_id-1]
def draw_detection(image, d, c, color, scale_factor_x, scale_factor_y):
"""Draw box and label for 1 detection."""
label = get_label(c)
ymin, xmin, ymax, xmax = d
# Scale coordinates
xmin, xmax = int(xmin * scale_factor_x), int(xmax * scale_factor_x)
ymin, ymax = int(ymin * scale_factor_y), int(ymax * scale_factor_y)
# Draw rectangle
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 2)
# Draw label text
label_position = (xmin + 5, ymin + 15)
cv2.putText(image, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1, cv2.LINE_AA)
return label
def annotate_image(image, results, thr=0.45, dim=640, offset_background=True):
global COLORS
oh, ow, _ = image.shape
rh, rw = oh / dim, ow / dim
for idx, class_detections in enumerate(results[list(results.keys())[0]][0]):
if class_detections.shape[0] > 0:
color = tuple(int(c) for c in COLORS[idx])
for det in class_detections:
if det[4] > thr:
if offset_background:
label = draw_detection(image, det[0:4] * dim, idx + 1, color, rw, rh)
else:
label = draw_detection(image, det[0:4] * dim, idx, color, rw, rh)
I’m using a custom trained yolov8n model. Running the same model using the gstreamer pipeline provided in hailo-rpi5-examples runs just fine. But running the same model using the below code gives random bounding boxes. Does infer_results require any additional postprocessing? What am I missing? Any help would be appreciated.
hailortcli parse-hef yolov8n.hef
gave the following outputBelow is the code I ran