import yolov7
# load pretrained or custom model
model = yolov7.load('yolov7.pt')
#model = yolov7.load('kadirnar/yolov7-v0.1', hf_model=True)
# set model parameters
model.conf = 0.25 # NMS confidence threshold
model.iou = 0.45 # NMS IoU threshold
# model.classes = None # (optional list) filter by class
model.classes = ["road", "sidewalk", "person", "rider", "motorbike", "bicycle", "car", "truck", "bus", "train", "wall", "fence", "traffic_sign", "traffic_light", "pole", "building", "vegetation", "sky"]
# set image
imgs = '/Users/sc0rp10n/MyStuff/college/cv/cv-project/IDD_Detection_Lite/JPEGImages/highquality_16k/BLR-2018-04-02_16-27-59/0000427.jpg'
# perform inference
# results = model(imgs)
# inference with larger input size and test time augmentation
results = model(imgs, size=1280, augment=True)
print(len(results))
print(results.pred[0].shape)
print(results)
# parse results
# predictions = results.pred[0]
# boxes = predictions[:, :4] # x1, y1, x2, y2
# scores = predictions[:, 4]
# categories = predictions[:, 5]
# show detection bounding boxes on image
results.show()
error msg
Fusing layers...
/Users/sc0rp10n/miniconda3/envs/cv/lib/python3.10/site-packages/torch/functional.py:512: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at /Users/runner/work/_temp/anaconda/conda-bld/pytorch_1712608659634/work/aten/src/ATen/native/TensorShape.cpp:3588.)
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
Traceback (most recent call last):
File "/Users/sc0rp10n/MyStuff/college/cv/cv-project/test2.py", line 20, in <module>
results = model(imgs, size=1280, augment=True)
File "/Users/sc0rp10n/miniconda3/envs/cv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/Users/sc0rp10n/miniconda3/envs/cv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "/Users/sc0rp10n/miniconda3/envs/cv/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/Users/sc0rp10n/miniconda3/envs/cv/lib/python3.10/site-packages/yolov7/models/common.py", line 951, in forward
y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS
File "/Users/sc0rp10n/miniconda3/envs/cv/lib/python3.10/site-packages/yolov7/utils/general.py", line 668, in non_max_suppression
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
ValueError: too many dimensions 'str'
my code
error msg