66yurimi99 / Z-ERO

졸음 운전 사고 방지를 위한 DMS 시스템과 자율주행 시스템(ZERO; Zㅔ발 Eyes Re-Open)
0 stars 0 forks source link

Feat: [FCA] Object Detection 기능 구현 #56

Closed YuNayeong closed 1 year ago

YuNayeong commented 1 year ago

Description

custom model을 이용한 object detection(Car) 기능 구현입니다.

Todo

build model_custom_yolo

def build_model(is_cuda): net = cv2.dnn.readNet('/home/intel/AI/convert2Yolo/yolov5/runs/train/exp9/weights/best.onnx') if is_cuda: print("use CUDA") net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA) net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA_FP16) else: print("use CPU") net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) return net

INPUT_WIDTH = 640 INPUT_HEIGHT = 640 NMS_THRESHOLD = 0.4 CONFIDENCE_THRESHOLD = 0.4

processing, detect

def detect(image, net): blob = cv2.dnn.blobFromImage(image, 1/255.0, (INPUT_WIDTH, INPUT_HEIGHT), swapRB=True, crop=False) net.setInput(blob) preds = net.forward() return preds

load class names

def load_classes(): class_list = [] with open('/home/intel/AI/convert2Yolo/yolov5/new_cvat/class.names','r') as f: class_list = [cname.strip() for cname in f.readlines()] return class_list class_list = load_classes()

output of object detection

def wrap_detection(input_image, output_data): class_ids = [] confidences = [] boxes = []

rows = output_data.shape[0]

image_width, image_height, _ = input_image.shape

# Scale Factor
x_factor = image_width / INPUT_WIDTH
y_factor = image_height / INPUT_HEIGHT

for r in range(rows):
    row = output_data[r]
    confidence = row[4]
    if confidence >= 0.4:

        classes_scores = row[5:]
        _, _, _, max_indx = cv2.minMaxLoc(classes_scores) # (min_val, max_val, min_location, max_location)
        class_id = max_indx[1]
        if (classes_scores[class_id] > .25):

            confidences.append(confidence)
            class_ids.append(class_id)
            x, y, w, h = row[0].item(), row[1].item(), row[2].item(), row[3].item()
            left = int((x - 0.5 * w) * x_factor)
            top = int((y - 0.5 * h) * y_factor)
            width = int(w * x_factor)
            height = int(h * y_factor)
            box = np.array([left, top, width, height])
            boxes.append(box)

indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.25, 0.45)

result_class_ids = []
result_confidences = []
result_boxes = []

for i in indexes:
    result_confidences.append(confidences[i])
    result_class_ids.append(class_ids[i])
    result_boxes.append(boxes[i])

return result_class_ids, result_confidences, result_boxes

""" ... skip ... """

in the loop

# draw bounding box on image
class_ids, confidences, boxes = wrap_detection(inputImage, outs[0])

for (classid, confidence, box) in zip(class_ids, confidences, boxes):
    cv2.rectangle(frame, box, (51, 255, 196), 2)
    cv2.rectangle(frame, (box[0], box[1] - 20), (box[0] + box[2], box[1]), (51, 255, 196), -1)
    cv2.putText(frame, class_list[classid], (box[0], box[1] - 5),    cv2.FONT_HERSHEY_SIMPLEX, .5, (0,0,0))        

<br>

## 산출물

- object detection test code
  [yolo_test.py.zip](https://github.com/66yurimi99/Z-ERO/files/13479517/yolo_test.py.zip)

## 참고자료
- opencv Deep Neural Network module
  https://docs.opencv.org/3.4/d6/d0f/group__dnn.html