Closed HeinZingerRyo closed 5 months ago
Fixed. One may follow this link
More specifically, one possible modification is:
in engine.py:
# accumulate predictions from all images
if coco_evaluator is not None:
coco_evaluator.accumulate()
coco_evaluator.summarize()
Add these lines after coco_evaluator.summarize()
and afterwise the code is like
# accumulate predictions from all images
if coco_evaluator is not None:
coco_evaluator.accumulate()
coco_evaluator.summarize()
classwise=True
if classwise:
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/blob/03064eb5bafe4a3e5750cc7a16672daf5afe8435/detectron2/evaluation/coco_evaluation.py#L259-L283 # noqa
cocoEval = coco_evaluator.coco_eval['bbox']
coco = coco_evaluator.coco_eval['bbox'].cocoDt
precisions = cocoEval.eval['precision']
catIds = coco.getCatIds()
# precision has dims (iou, recall, cls, area range, max dets)
assert len(catIds) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(catIds):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float('nan')
results_per_category.append(
('{}'.format(nm['name']),
'{:0.3f}'.format(float(ap * 100))))
N_COLS = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (N_COLS // 2)
results_2d = itertools.zip_longest(
*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print(table.table)
Does anybody know how to extract mAP for each class during evaluation? thx