Xilinx / QNN-MO-PYNQ

BSD 3-Clause "New" or "Revised" License
242 stars 114 forks source link

I want to use webcamera with tiny-yolo-image-loop #61

Open hiroyuki405 opened 4 years ago

hiroyuki405 commented 4 years ago

I want to use webcamera with tiny-yolo-image-loop. I have rebuilt darknet by adding ndarray_image following the steps below.

I modified tiny-yolo-image-loop as follows

out_dim = net['conv7']['output'][1]
out_ch = net['conv7']['output'][0]
img_folder = './yoloimages/'
file_name_out = c_char_p("/home/xilinx/jupyter_notebooks/qnn/detection".encode())
file_name_probs = c_char_p("/home/xilinx/jupyter_notebooks/qnn/probabilities.txt".encode())
file_names_voc = c_char_p("/opt/darknet/data/voc.names".encode())
tresh = c_float(0.3)
tresh_hier = c_float(0.5)
darknet_path = c_char_p("/opt/darknet/".encode())

conv_output = classifier.get_accel_buffer(out_ch, out_dim)
def nparray_to_image(img):
    data = img.ctypes.data_as(POINTER(c_ubyte))
    image = ndarray_image(data, img.ctypes.shape, img.ctypes.strides)
    print("nparray")
    return image
cap = cv2.VideoCapture(0) 
file_name="test.jpg"
while(1):
    ret , img = cap.read()
    cv2.waitKey(1)
    if not ret:
        print("read error")
        continue
#         cv2_im = cv2.cvtColor(cv2_im,cv2.COLOR_BGR2RGB)
#     img = PIL_Image.fromarray(cv2_im)
    print("start")
    img = nparray_to_image(img)
    img_letterbox = letterbox_image(img,416,416)
    img_copy = np.copy(np.ctypeslib.as_array(img_letterbox.data, (3,416,416)))
    img_copy = np.swapaxes(img_copy, 0,2)
    free_image(img)
    free_image(img_letterbox)

    #First convolution layer in sw
    if len(img_copy.shape)<4:
        img_copy = img_copy[np.newaxis, :, :, :]

    conv0_ouput = utils.conv_layer(img_copy,conv0_weights_correct,b=conv0_bias_broadcast,stride=2,padding=1)
    conv0_output_quant = conv0_ouput.clip(0.0,4.0)
    conv0_output_quant = utils.quantize(conv0_output_quant/4,3)

    #Offload to hardware
    conv_input = classifier.prepare_buffer(conv0_output_quant*7);
    classifier.inference(conv_input, conv_output)
    conv7_out = classifier.postprocess_buffer(conv_output)

    #Last convolution layer in sw
    conv7_out = conv7_out.reshape(out_dim,out_dim,out_ch)
    conv7_out = np.swapaxes(conv7_out, 0, 1) # exp 1
    if len(conv7_out.shape)<4:
        conv7_out = conv7_out[np.newaxis, :, :, :] 

    conv8_output = utils.conv_layer(conv7_out,conv8_weights_correct,b=conv8_bias_broadcast,stride=1)  
    conv8_out = conv8_output.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
    print("conv")
    #Draw detection boxes
    lib.forward_region_layer_pointer_nolayer(net_darknet,conv8_out)
    #lib.draw_detection_python(net_darknet, file_name, tresh, tresh_hier,file_names_voc, darknet_path, file_name_out, file_name_probs);

    #Display result
    IPython.display.clear_output(1)
    file_content = open(file_name_probs.value,"r").read().splitlines()
    detections = []
    print("for ")
    for line in file_content[0:]:
        name, probability = line.split(": ")
        detections.append((probability, name))
    for det in sorted(detections, key=lambda tup: tup[0], reverse=True):
        print("class: {}\tprobability: {}".format(det[1], det[0]))
    print("res")
    res = Image.open(file_name_out.value.decode() + ".png")
    print("display")
    display(res)

But I can't see anything in the output, what do I do? What's the cause?