For some reasons, the process fail when I run ./tools/uniad_dist_eval.sh ./projects/configs/stage2_e2e/base_e2e.py ./ckpts/uniad_base_e2e.pth 1
It fail when it is trying to evaluate on class bus, giving me a TypeError: can only concatenate tuple (not "int") to tuple. Does anyone know what could be the cause of it?
Computing metrics for class bus...
Traceback (most recent call last):
File "./tools/test.py", line 261, in
main()
File "./tools/test.py", line 257, in main
print(dataset.evaluate(outputs, **eval_kwargs))
File "/cutin/UniAD/projects/mmdet3d_plugin/datasets/nuscenes_e2e_dataset.py", line 1051, in evaluate
results_dict = self._evaluate_single(
File "/cutin/UniAD/projects/mmdet3d_plugin/datasets/nuscenes_e2e_dataset.py", line 1171, in _evaluate_single
self.nusc_eval_track.main()
File "/usr/local/lib/python3.8/dist-packages/nuscenes/eval/tracking/evaluate.py", line 204, in main
metrics, metric_data_list = self.evaluate()
File "/usr/local/lib/python3.8/dist-packages/nuscenes/eval/tracking/evaluate.py", line 135, in evaluate
accumulate_class(class_name)
File "/usr/local/lib/python3.8/dist-packages/nuscenes/eval/tracking/evaluate.py", line 131, in accumulate_class
curr_md = curr_ev.accumulate()
File "/usr/local/lib/python3.8/dist-packages/nuscenes/eval/tracking/algo.py", line 123, in accumulate
thresholds, recalls = self.compute_thresholds(gt_box_count)
File "/usr/local/lib/python3.8/dist-packages/nuscenes/eval/tracking/algo.py", line 308, in computethresholds
, scores = self.accumulate_threshold(threshold=None)
File "/usr/local/lib/python3.8/dist-packages/nuscenes/eval/tracking/algo.py", line 296, in accumulate_threshold
acc_merged = MOTAccumulatorCustom.merge_event_dataframes(accs)
File "/usr/local/lib/python3.8/dist-packages/nuscenes/eval/tracking/mot.py", line 107, in merge_event_dataframes
next_frame_id = max(r.index.get_level_values(0).max() + 1,
TypeError: can only concatenate tuple (not "int") to tuple
/usr/local/lib/python3.8/dist-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated
and will be removed in future. Use torchrun.
Note that --use_env is set by default in torchrun.
If your script expects --local_rank argument to be set, please
change it to read from os.environ['LOCAL_RANK'] instead. See
https://pytorch.org/docs/stable/distributed.html#launch-utility for
further instructions
warnings.warn(
ERROR:torch.distributed.elastic.multiprocessing.api:failed (exitcode: 1) local_rank: 0 (pid: 6852) of binary: /usr/bin/python
Traceback (most recent call last):
File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.8/dist-packages/torch/distributed/launch.py", line 193, in
main()
File "/usr/local/lib/python3.8/dist-packages/torch/distributed/launch.py", line 189, in main
launch(args)
File "/usr/local/lib/python3.8/dist-packages/torch/distributed/launch.py", line 174, in launch
run(args)
File "/usr/local/lib/python3.8/dist-packages/torch/distributed/run.py", line 752, in run
elastic_launch(
File "/usr/local/lib/python3.8/dist-packages/torch/distributed/launcher/api.py", line 131, in call
return launch_agent(self._config, self._entrypoint, list(args))
File "/usr/local/lib/python3.8/dist-packages/torch/distributed/launcher/api.py", line 245, in launch_agent
raise ChildFailedError(
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
For some reasons, the process fail when I run ./tools/uniad_dist_eval.sh ./projects/configs/stage2_e2e/base_e2e.py ./ckpts/uniad_base_e2e.pth 1
It fail when it is trying to evaluate on class bus, giving me a TypeError: can only concatenate tuple (not "int") to tuple. Does anyone know what could be the cause of it?
Computing metrics for class bus...
Traceback (most recent call last):
main()
File "./tools/test.py", line 257, in main
print(dataset.evaluate(outputs, **eval_kwargs))
File "/cutin/UniAD/projects/mmdet3d_plugin/datasets/nuscenes_e2e_dataset.py", line 1051, in evaluate
results_dict = self._evaluate_single(
File "/cutin/UniAD/projects/mmdet3d_plugin/datasets/nuscenes_e2e_dataset.py", line 1171, in _evaluate_single
self.nusc_eval_track.main()
File "/usr/local/lib/python3.8/dist-packages/nuscenes/eval/tracking/evaluate.py", line 204, in main
metrics, metric_data_list = self.evaluate()
File "/usr/local/lib/python3.8/dist-packages/nuscenes/eval/tracking/evaluate.py", line 135, in evaluate
accumulate_class(class_name)
File "/usr/local/lib/python3.8/dist-packages/nuscenes/eval/tracking/evaluate.py", line 131, in accumulate_class
curr_md = curr_ev.accumulate()
File "/usr/local/lib/python3.8/dist-packages/nuscenes/eval/tracking/algo.py", line 123, in accumulate
thresholds, recalls = self.compute_thresholds(gt_box_count)
File "/usr/local/lib/python3.8/dist-packages/nuscenes/eval/tracking/algo.py", line 308, in computethresholds
, scores = self.accumulate_threshold(threshold=None)
File "/usr/local/lib/python3.8/dist-packages/nuscenes/eval/tracking/algo.py", line 296, in accumulate_threshold
acc_merged = MOTAccumulatorCustom.merge_event_dataframes(accs)
File "/usr/local/lib/python3.8/dist-packages/nuscenes/eval/tracking/mot.py", line 107, in merge_event_dataframes
next_frame_id = max(r.index.get_level_values(0).max() + 1,
TypeError: can only concatenate tuple (not "int") to tuple
/usr/local/lib/python3.8/dist-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated
and will be removed in future. Use torchrun.
Note that --use_env is set by default in torchrun.
If your script expects
File "./tools/test.py", line 261, in
--local_rank
argument to be set, please change it to read fromos.environ['LOCAL_RANK']
instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructionswarnings.warn( ERROR:torch.distributed.elastic.multiprocessing.api:failed (exitcode: 1) local_rank: 0 (pid: 6852) of binary: /usr/bin/python Traceback (most recent call last): File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main return _run_code(code, main_globals, None, File "/usr/lib/python3.8/runpy.py", line 87, in _run_code exec(code, run_globals) File "/usr/local/lib/python3.8/dist-packages/torch/distributed/launch.py", line 193, in
main()
File "/usr/local/lib/python3.8/dist-packages/torch/distributed/launch.py", line 189, in main
launch(args)
File "/usr/local/lib/python3.8/dist-packages/torch/distributed/launch.py", line 174, in launch
run(args)
File "/usr/local/lib/python3.8/dist-packages/torch/distributed/run.py", line 752, in run
elastic_launch(
File "/usr/local/lib/python3.8/dist-packages/torch/distributed/launcher/api.py", line 131, in call
return launch_agent(self._config, self._entrypoint, list(args))
File "/usr/local/lib/python3.8/dist-packages/torch/distributed/launcher/api.py", line 245, in launch_agent
raise ChildFailedError(
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
./tools/test.py FAILED
Failures: