Open TekayaNidham opened 4 years ago
@lukeyeager
added this to the clusterDetection class, and it's returning zeros as coverage and freezing :
def gridbox_to_boxes(net_cvg, net_boxes, self):
im_sz_x = self.image_size_x
im_sz_y = self.image_size_y
stride = self.stride
grid_sz_x = int(im_sz_x / stride)
grid_sz_y = int(im_sz_y / stride)
boxes = []
cvgs = []
cell_width = im_sz_x / grid_sz_x
cell_height = im_sz_y / grid_sz_y
cvg_val = net_cvg[0, 0:grid_sz_y, 0:grid_sz_x]
if (self.is_groundtruth):
mask = (cvg_val > 0)
else:
mask = (cvg_val >= self.gridbox_cvg_threshold)
coord = np.where(mask == 1)
y = np.asarray(coord[0])
x = np.asarray(coord[1])
mx = x * cell_width
my = y * cell_height
x1 = (np.asarray([net_boxes[0][y[i]][x[i]] for i in list(range(x.size))]) + mx)
y1 = (np.asarray([net_boxes[1][y[i]][x[i]] for i in list(range(x.size))]) + my)
x2 = (np.asarray([net_boxes[2][y[i]][x[i]] for i in list(range(x.size))]) + mx)
y2 = (np.asarray([net_boxes[3][y[i]][x[i]] for i in list(range(x.size))]) + my)
boxes = np.transpose(np.vstack((x1, y1, x2, y2)))
cvgs = np.transpose(np.vstack((x, y, np.asarray(
[cvg_val[y[i]][x[i]] for i in list(range(x.size))]))))
return boxes, cvgs, mask
def vote_boxes(propose_boxes, propose_cvgs, mask, self):
""" Vote amongst the boxes using openCV's built-in clustering routine.
"""
detections_per_image = []
if not propose_boxes.any():
return detections_per_image
######################################################################
# GROUP RECTANGLES Clustering
######################################################################
nboxes, weights = cv.groupRectangles(
[[e[0],e[1],e[2]-e[0],e[3]-e[1]] for e in np.array(propose_boxes).tolist()],
self.gridbox_rect_thresh,
self.gridbox_rect_eps)
if len(nboxes):
for rect, weight in zip(nboxes, weights):
if rect[3] >= self.min_height:
confidence = math.log(weight[0])
detection = [rect[0], rect[1], rect[0]+rect[2], rect[1]+rect[3], confidence]
detections_per_image.append(detection)
return detections_per_image
def cluster(self, net_cvg, net_boxes):
"""
Read output of inference and turn into Bounding Boxes
"""
batch_size = net_cvg.shape[0]
boxes = np.zeros([batch_size, MAX_BOXES, 5])
for i in range(batch_size):
cur_cvg = net_cvg[i]
cur_boxes = net_boxes[i]
if (self.is_groundtruth):
# Gather proposals that pass a threshold -
propose_boxes, propose_cvgs, mask = gridbox_to_boxes(
cur_cvg, cur_boxes, self)
# Remove duplicates from ground truth
new_array = list({tuple(row) for row in propose_boxes})
boxes_cur_image = np.asarray(new_array, dtype=np.float16)
else:
# Gather proposals that pass a threshold -
propose_boxes, propose_cvgs, mask = gridbox_to_boxes(cur_cvg, cur_boxes, self)
# Vote across the proposals to get bboxes
boxes_cur_image = vote_boxes(propose_boxes, propose_cvgs, mask, self)
boxes_cur_image = np.asarray(boxes_cur_image, dtype=np.float16)
if (boxes_cur_image.shape[0] != 0):
[r, c] = boxes_cur_image.shape
boxes[i, 0:r, 0:c] = boxes_cur_image
print(boxes)
return boxes
(my_env) milos@milos-FX503VD:~/testmodel$ python dl.py \
> --prototxt deploy.prototxt \
> --model snapshot_iter_1900.caffemodel \
> --image 1.png
[INFO] loading model...
[INFO] computing object detections...
[[[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]]]
Hello everyone, i’m currently having a problem implementing an object detection model trained by nvidia digits and i always get this erros : cv2.error: OpenCV(4.2.0) /io/opencv/modules/dnn/src/dnn.cpp:562: error: (-2:Unspecified error) Can’t create layer “cluster” of type “” in function ‘getLayerInstance’
NVcaffe version : 0.15.14
the python code i’m using from pyimagesearch :
deploy.prototxt :
https://answers.opencv.org/question/189822/assertion-error-with-custom-trained-caffe/ applied what this says and still didn’t work unless i didn’t do it correclty if i didn’t can you please tell me what should i do ?