mikel-brostrom / boxmot

BoxMOT: pluggable SOTA tracking modules for segmentation, object detection and pose estimation models
GNU Affero General Public License v3.0
6.79k stars 1.72k forks source link

val benchmark==MOT20 BUG #1336

Closed XDUbeginner closed 8 months ago

XDUbeginner commented 9 months ago

Search before asking

Yolo Tracking Component

Evaluation

Bug

when I run val.py , report this error ![Uploading 微信图片_20240305124334.png…]()

Environment

Python == 3.9 Package Version Editable project location


absl-py 1.4.0 addict 2.4.0 aliyun-python-sdk-core 2.14.0 aliyun-python-sdk-kms 2.16.2 annotated-types 0.6.0 appdirs 1.4.4 beautifulsoup4 4.12.2 boxmot 10.0.52 e:\yolo_tracking-master2024_3_4\yolo_tracking-master brotlipy 0.7.0 cachetools 5.3.0 certifi 2023.5.7 cffi 1.15.1 cfgv 3.4.0 charset-normalizer 3.1.0 click 8.1.7 colorama 0.4.6 coloredlogs 15.0.1 contourpy 1.0.7 crcmod 1.7 cryptography 40.0.2 cycler 0.11.0 Cython 3.0.8 distlib 0.3.8 easydict 1.10 efficientnet-pytorch 0.7.1 einops 0.6.1 filelock 3.13.1 filterpy 1.4.5 flatbuffers 23.5.9 fonttools 4.39.3 fsspec 2023.6.0 ftfy 6.1.3 future 0.18.3 gdown 5.1.0 gitdb 4.0.10 GitPython 3.1.31 google-auth 2.18.0 google-auth-oauthlib 1.0.0 grad-cam 1.4.8 grpcio 1.54.0 huggingface-hub 0.15.1 humanfriendly 10.0 identify 2.5.35 idna 3.4 importlib-metadata 6.6.0 importlib-resources 5.12.0 Jinja2 3.1.3 jmespath 0.10.0 joblib 1.3.1 kiwisolver 1.4.4 lap 0.4.0 lapx 0.5.5 loguru 0.7.0 Markdown 3.4.3 markdown-it-py 3.0.0 MarkupSafe 2.1.2 matplotlib 3.7.1 mdurl 0.1.2 mkl-service 2.4.0 mmcv 2.1.0 mmengine 0.10.3 model-index 0.1.11 motmetrics 1.4.0 mpmath 1.3.0 networkx 3.2.1 nodeenv 1.8.0 numpy 1.24.4 nvidia-cublas-cu11 11.11.3.6 oauthlib 3.2.2 onnxruntime-gpu 1.14.1 opencv-python 4.7.0.72 opendatalab 0.0.10 openmim 0.3.9 openxlab 0.0.34 ordered-set 4.1.0 oss2 2.17.0 packaging 23.1 pandas 2.0.1 Pillow 9.5.0 pip 23.1.2 platformdirs 4.2.0 pre-commit 3.6.2 protobuf 4.23.0 psutil 5.9.5 py-cpuinfo 9.0.0 pyasn1 0.5.0 pyasn1-modules 0.3.0 pycocotools 2.0.7 pycparser 2.21 pycryptodome 3.20.0 pydantic 2.6.3 pydantic_core 2.16.3 Pygments 2.15.1 pyOpenSSL 23.1.1 pyparsing 3.1.0b1 pyreadline3 3.4.1 PySocks 1.7.1 python-dateutil 2.8.2 pytz 2023.3 pywin32 306 PyYAML 6.0 regex 2023.6.3 requests 2.28.2 requests-oauthlib 1.3.1 rich 13.4.2 rsa 4.9 safetensors 0.3.1 scikit-learn 1.3.0 scipy 1.10.1 seaborn 0.12.2 sentry-sdk 1.22.2 setuptools 60.2.0 six 1.16.0 smmap 5.0.0 soupsieve 2.4.1 sympy 1.12 tabulate 0.9.0 tensorboard 2.13.0 tensorboard-data-server 0.7.0 termcolor 2.3.0 thop 0.1.1.post2209072238 threadpoolctl 3.2.0 tidecv 1.0.1 timm 0.6.13 tomli 2.0.1 torch 1.13.1+cu117 torchaudio 0.13.1+cu117 torchvision 0.14.1+cu117 tqdm 4.65.0 ttach 0.0.3 typing_extensions 4.10.0 tzdata 2023.3 ultralytics 8.0.228 urllib3 1.26.15 virtualenv 20.25.1 wcwidth 0.2.13 Werkzeug 2.3.4 wheel 0.40.0 win-inet-pton 1.1.0 win32-setctime 1.1.0 xmltodict 0.13.0 yacs 0.1.8 yapf 0.40.0 zipp 3.15.0

Minimal Reproducible Example

import re import sys import argparse import subprocess from boxmot.utils import EXAMPLES, ROOT, WEIGHTS, EXPERIMENTATION from pathlib import Path from tracking.utils import ( download_mot_eval_tools, download_mot_dataset, unzip_mot_dataset, eval_setup ) from ultralytics.utils.files import increment_path

def parse_mot_results(results): """Extract the COMBINED HOTA, MOTA, IDF1 from the results generate by the run_mot_challenge.py script.

    Args:
        str: mot_results

    Returns:
        (dict): {'HOTA': x, 'MOTA':y, 'IDF1':z}
    """
    combined_results = results.split('COMBINED')[2:-1]
    # robust way of getting first ints/float in string
    combined_results = [float(re.findall("[-+]?(?:\d*\.*\d+)", f)[0]) for f in combined_results]
    # pack everything in dict
    combined_results = {key: value for key, value in zip(['HOTA', 'MOTA', 'IDF1'], combined_results)}
    return combined_results

def trackeval( args, seq_paths, save_dir, MOT_results_folder, gt_folder, metrics = ["HOTA", "CLEAR", "Identity"] ): """ Executes a Python script to evaluate MOT challenge tracking results using specified metrics.

Parameters:
    script_path (str): The path to the evaluation script to run.
    trackers_folder (str): The folder where tracker results are stored.
    metrics (list): A list of metrics to use for evaluation. Defaults to ["HOTA", "CLEAR", "Identity"].
    num_parallel_cores (int): The number of parallel cores to use for evaluation. Defaults to 4.

Outputs:
    Prints the standard output and standard error from the evaluation script.
"""
# Define paths
d = [seq_path.parent.name for seq_path in seq_paths]
# Prepare arguments for subprocess call
args = [
    sys.executable, EXPERIMENTATION / 'val_utils' / 'scripts' / 'run_mot_challenge.py',
    "--GT_FOLDER", str(gt_folder),
    "--BENCHMARK", "",
    "--TRACKERS_FOLDER", args.exp_folder_path,
    "--TRACKERS_TO_EVAL", "",
    "--SPLIT_TO_EVAL", "train",
    "--METRICS", *metrics,
    "--USE_PARALLEL", "True",
    "--TRACKER_SUB_FOLDER", "",
    "--NUM_PARALLEL_CORES", str(4),
    "--SKIP_SPLIT_FOL", "True",
    "--SEQ_INFO", *d
]

# Execute the evaluation script
p = subprocess.Popen(
    args=args,
    stdout=subprocess.PIPE,
    stderr=subprocess.PIPE,
    text=True
)

stdout, stderr = p.communicate()

# Output the results
print("Standard Output:\n", stdout)
if stderr:
    print("Standard Error:\n", stderr)
return stdout

def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--yolo-model', type=Path, default=WEIGHTS / 'yolov8_best.pt', help='yolo model path') parser.add_argument('--reid-model', type=Path, default=WEIGHTS / 'osnet_x1_0_msmt17.pt', help='reid model path') parser.add_argument('--tracking-method', type=str, default='botsort', help='deepocsort, botsort, strongsort, ocsort, bytetrack') parser.add_argument('--source', type=str, default='F:/all_dataset/VisDrone-Track/VisDrone2019-MOT-test-challenge/sequences/uav0000320_03910_v', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf', type=float, default=0.1, help='confidence threshold') parser.add_argument('--iou', type=float, default=0.1, help='intersection over union (IoU) threshold for NMS') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--show', action='store_true', help='display tracking video results') parser.add_argument('--save', action='store_true', help='save video tracking results')

class 0 is person, 1 is bycicle, 2 is car... 79 is oven

parser.add_argument('--classes', nargs='+', type=int,
                    help='filter by class: --classes 0, or --classes 0 2 3')
parser.add_argument('--project', default=ROOT / 'runs' / 'mot',
                    help='save results to project/name')
parser.add_argument('--name', default='yolov8_best_osnet_x1_0_msmt17',
                    help='save results to project/name')
parser.add_argument('--dets', type=str, default='yolov8_best',
                    help='the folder name under project to load the detections from')
parser.add_argument('--embs', type=str, default='osnet_x1_0_msmt17',
                    help='the folder name under project/dets to load the embeddings from')
parser.add_argument('--exist-ok', action='store_true',
                    help='existing project/name ok, do not increment')
parser.add_argument('--half', action='store_true',
                    help='use FP16 half-precision inference')
parser.add_argument('--vid-stride', type=int, default=1,
                    help='video frame-rate stride')
parser.add_argument('--show-labels', action='store_false',
                    help='either show all or only bboxes')
parser.add_argument('--show-conf', action='store_false',
                    help='hide confidences when show')
parser.add_argument('--save-txt', action='store_true',default = True,
                    help='save tracking results in a txt file')
parser.add_argument('--save-id-crops', action='store_true',
                    help='save each crop to its respective id folder')
parser.add_argument('--save-mot', action='store_true',
                    help='save tracking results in a single txt file')
parser.add_argument('--line-width', default=None, type=int,
                    help='The line width of the bounding boxes. If None, it is scaled to the image size.')
parser.add_argument('--per-class', default=False, action='store_true',
                    help='not mix up classes when tracking')
parser.add_argument('--verbose', default=True, action='store_true',
                    help='print results per frame')
parser.add_argument('--agnostic-nms', default=False, action='store_true',
                    help='class-agnostic NMS')
parser.add_argument('--benchmark', type=str, default='MOT20',
                    help='MOT16, MOT17, MOT20')
parser.add_argument('--split', type=str, default='train',
                    help='existing project/name ok, do not increment')

opt = parser.parse_args()
return opt

def run_trackeval(opt): if opt is None: opt = parse_opt() exp_folderpath = opt.project / (str(opt.dets) + "" + str(opt.embs) + "_" + str(opt.tracking_method)) exp_folder_path = increment_path(path=exp_folderpath,mkdir=True, sep="", exist_ok=opt.exist_ok) opt.exp_folder_path = exp_folder_path else: opt = opt opt.exist_ok = False

val_tools_path = EXPERIMENTATION / 'val_utils'
download_mot_eval_tools(val_tools_path)
zip_path = download_mot_dataset(val_tools_path, opt.benchmark)
unzip_mot_dataset(zip_path, val_tools_path, opt.benchmark)
seq_paths, save_dir, MOT_results_folder, gt_folder = eval_setup(opt, val_tools_path)
results = trackeval(opt, seq_paths, save_dir, MOT_results_folder, gt_folder)
combined_results = parse_mot_results(results)
print(combined_results)
return combined_results

if name == "main": run_trackeval(None)

XDUbeginner commented 9 months ago

Pictures of the above bugs

XDUbeginner commented 9 months ago

微信图片_20240305124334

github-actions[bot] commented 8 months ago

👋 Hello, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs. Feel free to inform us of any other issues you discover or feature requests that come to mind in the future. Pull Requests (PRs) are also always welcomed!