I am currently working on a project using a Jetson Nano where I am trying to perform inference using TensorRT (Yolov5 object detection) in a ROS (Melodic) node. I have encountered a Bus Error(Core dumped) / segmentation fault (core dumped), which seems to arise from a memory conflict between ROS and TensorRT. I am seeking advice or solutions from anyone who might have faced and resolved a similar issue.
I have provided the code to my inference node below. I would greatly appreciate any insights, suggestions, or solutions.
I am using the following node(camera.py) to give the input (both the above node and this node are in the same rospackage):
import sys
import cv2
import imutils
import time
from yoloDet import YoloTRT
use path for library and engine file
model = YoloTRT(library="yolov5/build/libmyplugins.so", engine="yolov5/build/yolov5s_1504.engine", conf=0.5, yolo_ver="v5")
cap = cv2.VideoCapture("./DJI_trimmed.mp4")
frame_process_time = []
while True:
ret, frame = cap.read()
if not ret:
print("Failed to read the frame or end of video reached")
break
frame = imutils.resize(frame, width=1504)
detections, t = model.Inference(frame)
for obj in detections:
print(obj['class'], obj['conf'], obj['box'])
print("FPS: {} sec".format(1/t))
frame_process_time.append(t) # Store the time taken for this frame
cv2.imshow("Output", frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Calculate Average FPS
if frame_process_time:
average_time_per_frame = sum(frame_process_time) / len(frame_process_time)
average_fps = 1.0 / average_time_per_frame
print(f"Average FPS: {average_fps}")
else:
print("No frames were processed.")`
Hello everyone,
I am currently working on a project using a Jetson Nano where I am trying to perform inference using TensorRT (Yolov5 object detection) in a ROS (Melodic) node. I have encountered a Bus Error(Core dumped) / segmentation fault (core dumped), which seems to arise from a memory conflict between ROS and TensorRT. I am seeking advice or solutions from anyone who might have faced and resolved a similar issue.
I have provided the code to my inference node below. I would greatly appreciate any insights, suggestions, or solutions.
Environment Details:
Jetpack Version: 4.6. ROS Version: ROS Melodic TensorRT Version: 8.2.1.8 PyCUDA: 2019.1.2 Torch version: 1.10.0 CUDA: 10.2
`import cv2 import numpy as np import tensorrt as trt import pycuda.autoinit import random import ctypes import pycuda.driver as cuda import time
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) host_inputs = [] cuda_inputs = [] host_outputs = [] cuda_outputs = [] bindings = []
class YoloTRT(): def init(self, library, engine, conf, yolo_ver): self.CONF_THRESH = conf self.IOU_THRESHOLD = 0.4 self.LEN_ALL_RESULT = 38001 self.LEN_ONE_RESULT = 38 self.yolo_version = yolo_ver self.categories = ["pedestrian", "people", "bicycle", "car", "van", "truck", "tricycle", "awning-tricycle", "bus", "motor"] ''' self.categories = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"] ''' TRT_LOGGER = trt.Logger(trt.Logger.INFO)
I am using the following node(camera.py) to give the input (both the above node and this node are in the same rospackage): import sys import cv2 import imutils import time from yoloDet import YoloTRT
use path for library and engine file
model = YoloTRT(library="yolov5/build/libmyplugins.so", engine="yolov5/build/yolov5s_1504.engine", conf=0.5, yolo_ver="v5")
cap = cv2.VideoCapture("./DJI_trimmed.mp4") frame_process_time = []
while True: ret, frame = cap.read() if not ret: print("Failed to read the frame or end of video reached") break frame = imutils.resize(frame, width=1504) detections, t = model.Inference(frame) for obj in detections: print(obj['class'], obj['conf'], obj['box']) print("FPS: {} sec".format(1/t))
cap.release() cv2.destroyAllWindows()
Calculate Average FPS
if frame_process_time: average_time_per_frame = sum(frame_process_time) / len(frame_process_time) average_fps = 1.0 / average_time_per_frame print(f"Average FPS: {average_fps}") else: print("No frames were processed.")`