Running the base code below is producing the unexpected results and poor evaluation performance.
Full runnable code or full changes you made:
File below: test_lazy.py
The registered coco datasets are simply the COCO datasets from https://cocodataset.org/#home but re-registered to be in a different directory structure than the expected coco/ structure on https://detectron2.readthedocs.io/en/latest/tutorials/builtin_datasets.html
<# import some common libraries
import os
import logging
logger = logging.getLogger("detectron2")
import some common detectron2 utilities
from detectron2.data.datasets import register_coco_instances
from detectron2.utils import comm
from detectron2.utils.logger import setup_logger
from detectron2.config import LazyConfig, instantiate
from detectron2.engine import DefaultTrainer, AMPTrainer, default_writers, hooks, default_setup
from detectron2.engine.defaults import create_ddp_model
from detectron2.evaluation import inference_on_dataset, print_csv_format
from detectron2.engine import launch, default_argument_parser
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.model_zoo import get_config
def do_test(cfg, model):
if "evaluator" in cfg.dataloader:
ret = inference_on_dataset(
model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator)
)
print_csv_format(ret)
return ret
if name == 'main':
args = default_argument_parser().parse_args()
# When running multi-gpu training, must be called through launch.py
launch(main, num_gpus_per_machine=1, num_machines=1,
dist_url=args.dist_url,
machine_rank=0,
args=(args,))>
2. What exact command you run:
python test_lazy.py
3. __Full logs__ or other relevant observations:
<[05/04 10:07:24 fvcore.common.checkpoint]: [Checkpointer] Loading from https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x/137260431/model_final_a54504.pkl ...
[05/04 10:07:25 fvcore.common.checkpoint]: Reading a file from 'Detectron2 Model Zoo'
WARNING [05/04 10:07:25 fvcore.common.checkpoint]: The checkpoint state_dict contains keys that are not used by the model:
proposal_generator.anchor_generator.cell_anchors.{0, 1, 2, 3, 4}
[05/04 10:07:25 d2.data.datasets.coco]: Loaded 5000 images in COCO format from coco2017/annotations/instances_val2017.json
[05/04 10:07:25 d2.data.datasets.coco]: Loaded 5000 images in COCO format from coco2017/annotations/instances_val2017.json
[05/04 10:07:26 d2.data.build]: Distribution of instances among all 80 categories:
category
#instances
category
#instances
category
#instances
person
10777
bicycle
314
car
1918
motorcycle
367
airplane
143
bus
283
train
190
truck
414
boat
424
traffic light
634
fire hydrant
101
stop sign
75
parking meter
60
bench
411
bird
427
cat
202
dog
218
horse
272
sheep
354
cow
372
elephant
252
bear
71
zebra
266
giraffe
232
backpack
371
umbrella
407
handbag
540
tie
252
suitcase
299
frisbee
115
skis
241
snowboard
69
sports ball
260
kite
327
baseball bat
145
baseball gl..
148
skateboard
179
surfboard
267
tennis racket
225
bottle
1013
wine glass
341
cup
895
fork
215
knife
325
spoon
253
bowl
623
banana
370
apple
236
sandwich
177
orange
285
broccoli
312
carrot
365
hot dog
125
pizza
284
donut
328
cake
310
chair
1771
couch
261
potted plant
342
bed
163
dining table
695
toilet
179
tv
288
laptop
231
mouse
106
remote
283
keyboard
153
cell phone
262
microwave
55
oven
143
toaster
9
sink
225
refrigerator
126
book
1129
clock
267
vase
274
scissors
36
teddy bear
190
hair drier
11
toothbrush
57
total
36335
[05/04 10:07:26 d2.data.build]: Distribution of instances among all 80 categories:
Instructions To Reproduce the Issue:
Running the base code below is producing the unexpected results and poor evaluation performance.
<# import some common libraries import os import logging logger = logging.getLogger("detectron2")
import some common detectron2 utilities
from detectron2.data.datasets import register_coco_instances from detectron2.utils import comm from detectron2.utils.logger import setup_logger from detectron2.config import LazyConfig, instantiate from detectron2.engine import DefaultTrainer, AMPTrainer, default_writers, hooks, default_setup from detectron2.engine.defaults import create_ddp_model from detectron2.evaluation import inference_on_dataset, print_csv_format from detectron2.engine import launch, default_argument_parser from detectron2.checkpoint import DetectionCheckpointer from detectron2.model_zoo import get_config
def main(args): logger = setup_logger()
Handle COCO datasets
def do_test(cfg, model): if "evaluator" in cfg.dataloader: ret = inference_on_dataset( model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator) ) print_csv_format(ret) return ret
if name == 'main': args = default_argument_parser().parse_args()
[05/04 10:13:50 d2.evaluation.testing]: copypaste: Task: bbox [05/04 10:13:50 d2.evaluation.testing]: copypaste: Task: bbox [05/04 10:13:50 d2.evaluation.testing]: copypaste: AP,AP50,AP75,APs,APm,APl [05/04 10:13:50 d2.evaluation.testing]: copypaste: AP,AP50,AP75,APs,APm,APl [05/04 10:13:50 d2.evaluation.testing]: copypaste: 0.0412,0.0772,0.0380,0.0696,0.0545,0.0550 [05/04 10:13:50 d2.evaluation.testing]: copypaste: 0.0412,0.0772,0.0380,0.0696,0.0545,0.0550 [05/04 10:13:50 d2.evaluation.testing]: copypaste: Task: segm [05/04 10:13:50 d2.evaluation.testing]: copypaste: Task: segm [05/04 10:13:50 d2.evaluation.testing]: copypaste: AP,AP50,AP75,APs,APm,APl [05/04 10:13:50 d2.evaluation.testing]: copypaste: AP,AP50,AP75,APs,APm,APl [05/04 10:13:50 d2.evaluation.testing]: copypaste: 0.0374,0.0720,0.0347,0.0418,0.0415,0.0708 [05/04 10:13:50 d2.evaluation.testing]: copypaste: 0.0374,0.0720,0.0347,0.0418,0.0415,0.0708 OrderedDict([('bbox', {'AP': 0.041204753759922115, 'AP50': 0.0771635532890811, 'AP75': 0.0379770943305908, 'APs': 0.06958980737201922, 'APm': 0.0544606967609001, 'APl': 0.05498009465260034}), ('segm', {'AP': 0.037437911241689624, 'AP50': 0.07199709558586864, 'AP75': 0.03473483454833331, 'APs': 0.041843848702615545, 'APm': 0.041454509260223496, 'APl': 0.07075589063798658})])>