facebookresearch / detectron2

Detectron2 is a platform for object detection, segmentation and other visual recognition tasks.
https://detectron2.readthedocs.io/en/latest/
Apache License 2.0
29.91k stars 7.4k forks source link

DeepLab Inference seems to not be working / Unable to display predictions #2707

Open michelewang opened 3 years ago

michelewang commented 3 years ago

Hi, thank you Yuxin and team for making this amazing resource and for all of your support answering questions!!!

I'm running into an issue where I think I am either not running inference properly or I'm just having difficulty displaying my predictions because I keep getting an error when trying to run visualize_data.py.

Instructions To Reproduce the 🐛 Bug:

  1. Full runnable code or full changes you made: This is my file train_net_xbd.py (removing imports)
    
    def build_sem_seg_train_aug(cfg):
    augs = [
        T.ResizeShortestEdge(
            cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
        )
    ]
    if cfg.INPUT.CROP.ENABLED:
        augs.append(
            T.RandomCrop_CategoryAreaConstraint(
                cfg.INPUT.CROP.TYPE,
                cfg.INPUT.CROP.SIZE,
                cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,
                cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
            )
        )
    augs.append(T.RandomFlip())
    return augs

class Trainer(DefaultTrainer): """ We use the "DefaultTrainer" which contains a number pre-defined logic for standard training workflow. They may not work for you, especially if you are working on a new research project. In that case you can use the cleaner "SimpleTrainer", or write your own training loop. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type if evaluator_type == "sem_seg": print("datasetname", dataset_name) return SemSegEvaluator( dataset_name, distributed=True, num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, output_dir=output_folder, ) return DatasetEvaluators(evaluator_list)

@classmethod
def build_train_loader(cls, cfg):
    if "SemanticSegmentor" in cfg.MODEL.META_ARCHITECTURE:
        mapper = DatasetMapper(cfg, is_train=True, augmentations=build_sem_seg_train_aug(cfg))
    else:
        mapper = None
    return build_detection_train_loader(cfg, mapper=mapper)

@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
    """
    It now calls :func:`detectron2.solver.build_lr_scheduler`.
    Overwrite it if you'd like a different scheduler.
    """
    return build_lr_scheduler(cfg, optimizer)

def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() add_deeplab_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) return cfg

def get_building_dicts(img_dir): """This function loads the JSON file created with the annotator and converts it to the detectron2 metadata specifications. """ img_links = glob.glob(img_dir+"labels/*.json")

only keep the images that include post

img_anns = list(filter(lambda x: "post" in x, img_links)) 

dataset_dicts = []
# loop through the entries in the JSON file
for idx, single in enumerate(img_anns):
    v = json.load(open(single))
    record = {}
    # add file_name, image_id, height and width information to the records
    filename = os.path.join(img_dir, "images/", v["metadata"]["img_name"])
    height, width = (v["metadata"]["height"], v["metadata"]["width"])

    record["file_name"] = filename
    record["image_id"] = idx
    record["height"] = height
    record["width"] = width
    record["sem_seg_file_name"] = img_dir+"bin_masks/" + v["metadata"]["img_name"]
    dataset_dicts.append(record)

return dataset_dicts

def main(args):

for d in ["train", "test"]:
    DatasetCatalog.register(
        "xbddata_" + d, lambda d=d: get_building_dicts("/n/tambe_lab/Users/michelewang/" + d+"/"),
    )
    MetadataCatalog.get("xbddata_"+d).stuff_classes = ["0","1","2"]
    MetadataCatalog.get("xbddata_"+d).evaluator_type = "sem_seg"

print("Dataset Catalog", DatasetCatalog.list())
print("XBDDATA_TRAIN", DatasetCatalog.get("xbddata_train"))
xbdtrain_metadata = MetadataCatalog.get("xbddata_train")
xbdtest_metadata = MetadataCatalog.get("xbddata_test")

cfg = setup(args)

if args.eval_only:
    print("hi, we're in eval only")
    model = Trainer.build_model(cfg)
    print("cfg.MODEL.WEIGHTS", cfg.MODEL.WEIGHTS)
    print("cfg.OUTPUT_DIR", cfg.OUTPUT_DIR)
    DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
        cfg.MODEL.WEIGHTS, resume=args.resume
    )
    res = Trainer.test(cfg, model)
    return res

trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()

if name == "main": args = default_argument_parser().parse_args() print("Command Line Args:", args) launch( main, args.num_gpus, num_machines=args.num_machines, machine_rank=args.machine_rank, dist_url=args.dist_url, args=(args,), )

This is my file `visualize_data.py`:

!/usr/bin/env python

Copyright (c) Facebook, Inc. and its affiliates.

import argparse import os from itertools import chain import cv2 import tqdm

from detectron2.config import get_cfg from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_train_loader from detectron2.data import detection_utils as utils from detectron2.data.build import filter_images_with_few_keypoints from detectron2.utils.logger import setup_logger from detectron2.utils.visualizer import Visualizer

def setup(args): cfg = get_cfg() if args.config_file: cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.DATALOADER.NUM_WORKERS = 0 cfg.freeze() return cfg

def parse_args(in_args=None): parser = argparse.ArgumentParser(description="Visualize ground-truth data") parser.add_argument( "--source", choices=["annotation", "dataloader"], required=True, help="visualize the annotations or the data loader (with pre-processing)", ) parser.add_argument("--config-file", metavar="FILE", help="path to config file") parser.add_argument("--output-dir", default="./", help="path to output directory") parser.add_argument("--show", action="store_true", help="show output in a window") parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) return parser.parse_args(in_args)

if name == "main": args = parse_args() logger = setup_logger() logger.info("Arguments: " + str(args)) cfg = setup(args)

dirname = args.output_dir
os.makedirs(dirname, exist_ok=True)
metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])

def output(vis, fname):
    if args.show:
        print(fname)
        cv2.imshow("window", vis.get_image()[:, :, ::-1])
        cv2.waitKey()
    else:
        filepath = os.path.join(dirname, fname)
        print("Saving to {} ...".format(filepath))
        vis.save(filepath)

scale = 1.0
if args.source == "dataloader":
    train_data_loader = build_detection_train_loader(cfg)
    for batch in train_data_loader:
        for per_image in batch:
            # Pytorch tensor is in (C, H, W) format
            img = per_image["image"].permute(1, 2, 0).cpu().detach().numpy()
            img = utils.convert_image_to_rgb(img, cfg.INPUT.FORMAT)

            visualizer = Visualizer(img, metadata=metadata, scale=scale)
            target_fields = per_image["instances"].get_fields()
            labels = [metadata.thing_classes[i] for i in target_fields["gt_classes"]]
            vis = visualizer.overlay_instances(
                labels=labels,
                boxes=target_fields.get("gt_boxes", None),
                masks=target_fields.get("gt_masks", None),
                keypoints=target_fields.get("gt_keypoints", None),
            )
            output(vis, str(per_image["image_id"]) + ".jpg")
else:
    dicts = list(chain.from_iterable([DatasetCatalog.get(k) for k in cfg.DATASETS.TRAIN]))
    if cfg.MODEL.KEYPOINT_ON:
        dicts = filter_images_with_few_keypoints(dicts, 1)
    for dic in tqdm.tqdm(dicts):
        img = utils.read_image(dic["file_name"], "RGB")
        visualizer = Visualizer(img, metadata=metadata, scale=scale)
        vis = visualizer.draw_dataset_dict(dic)
        output(vis, os.path.basename(dic["file_name"]))
2. What exact command you run:
This was the script I ran for inference (It finished in <1 min so I think something is wrong here):

cd /n/home07/michelewang/thesis/detectron2/projects/DeepLab python train_net_xbd.py --config-file configs/xBD-configs/base-deeplabv3.yaml --eval-only MODEL.WEIGHTS ./output/model_0024999.pth


This was the script I ran to try to view my predictions:

cd /n/home07/michelewang/thesis/detectron2/tools python visualize_data.py --source annotation --config-file ../projects/DeepLab/configs/xBD-configs/base-deeplabv3.yaml --output-dir ../projects/DeepLab/output/inference --show

3. __Full logs__ or other relevant observations:
Logs from Inference:

WARNING [03/07 21:38:53 d2.evaluation.sem_seg_evaluation]: SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata. WARNING [03/07 21:38:53 d2.evaluation.sem_seg_evaluation]: SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata. [03/07 21:38:54 d2.evaluation.evaluator]: Start inference on 933 images [03/07 21:38:56 d2.evaluation.evaluator]: Inference done 11/933. 0.0817 s / img. ETA=0:01:55 [03/07 21:39:01 d2.evaluation.evaluator]: Inference done 51/933. 0.0817 s / img. ETA=0:01:50 [03/07 21:39:06 d2.evaluation.evaluator]: Inference done 70/933. 0.0817 s / img. ETA=0:02:26 [03/07 21:39:11 d2.evaluation.evaluator]: Inference done 110/933. 0.0818 s / img. ETA=0:02:06 [03/07 21:39:17 d2.evaluation.evaluator]: Inference done 150/933. 0.0818 s / img. ETA=0:01:54 [03/07 21:39:22 d2.evaluation.evaluator]: Inference done 190/933. 0.0819 s / img. ETA=0:01:45 [03/07 21:39:27 d2.evaluation.evaluator]: Inference done 229/933. 0.0820 s / img. ETA=0:01:38 [03/07 21:39:32 d2.evaluation.evaluator]: Inference done 270/933. 0.0820 s / img. ETA=0:01:30 [03/07 21:39:37 d2.evaluation.evaluator]: Inference done 310/933. 0.0819 s / img. ETA=0:01:24 [03/07 21:39:42 d2.evaluation.evaluator]: Inference done 351/933. 0.0819 s / img. ETA=0:01:18 [03/07 21:39:47 d2.evaluation.evaluator]: Inference done 391/933. 0.0819 s / img. ETA=0:01:12 [03/07 21:39:52 d2.evaluation.evaluator]: Inference done 432/933. 0.0819 s / img. ETA=0:01:06 [03/07 21:39:57 d2.evaluation.evaluator]: Inference done 473/933. 0.0819 s / img. ETA=0:01:00 [03/07 21:40:02 d2.evaluation.evaluator]: Inference done 514/933. 0.0819 s / img. ETA=0:00:54 [03/07 21:40:07 d2.evaluation.evaluator]: Inference done 554/933. 0.0818 s / img. ETA=0:00:49 [03/07 21:40:12 d2.evaluation.evaluator]: Inference done 595/933. 0.0818 s / img. ETA=0:00:44 [03/07 21:40:17 d2.evaluation.evaluator]: Inference done 636/933. 0.0818 s / img. ETA=0:00:38 [03/07 21:40:23 d2.evaluation.evaluator]: Inference done 677/933. 0.0818 s / img. ETA=0:00:33 [03/07 21:40:28 d2.evaluation.evaluator]: Inference done 717/933. 0.0818 s / img. ETA=0:00:27 [03/07 21:40:33 d2.evaluation.evaluator]: Inference done 758/933. 0.0818 s / img. ETA=0:00:22 [03/07 21:40:38 d2.evaluation.evaluator]: Inference done 799/933. 0.0818 s / img. ETA=0:00:17 [03/07 21:40:43 d2.evaluation.evaluator]: Inference done 840/933. 0.0818 s / img. ETA=0:00:11 [03/07 21:40:48 d2.evaluation.evaluator]: Inference done 881/933. 0.0818 s / img. ETA=0:00:06 [03/07 21:40:53 d2.evaluation.evaluator]: Inference done 922/933. 0.0818 s / img. ETA=0:00:01 [03/07 21:40:54 d2.evaluation.evaluator]: Total inference time: 0:01:59.094921 (0.128335 s / img per device, on 1 devices) [03/07 21:40:54 d2.evaluation.evaluator]: Total inference pure compute time: 0:01:15 (0.081782 s / img per device, on 1 devices) [03/07 21:40:56 d2.evaluation.sem_seg_evaluation]: OrderedDict([('sem_seg', {'mIoU': 14.286231780375028, 'fwIoU': 30.412502468859948, 'IoU-0': 31.958253593820217, 'IoU-1': 5.555678249276511, 'IoU-2': 5.344763498028353, 'mACC': 53.518182755044954, 'pACC': 34.17634953767668, 'ACC-0': 32.13264257356594, 'ACC-1': 69.63928432518483, 'ACC-2': 58.78262136638409})]) [03/07 21:40:56 d2.engine.defaults]: Evaluation results for xbddata_test in csv format: [03/07 21:40:56 d2.evaluation.testing]: copypaste: Task: sem_seg [03/07 21:40:56 d2.evaluation.testing]: copypaste: mIoU,fwIoU,mACC,pACC [03/07 21:40:56 d2.evaluation.testing]: copypaste: 14.2862,30.4125,53.5182,34.1763


**The Semantic Segmentation JSON created in my Outputs folder, `sem_seg_predictions.json`**  -- is this normal??
![image](https://user-images.githubusercontent.com/10359314/110288187-9de87f00-7fb5-11eb-8895-068c860c7447.png)

**The Error I got from my second script to visualize_data.py:**

Traceback (most recent call last): Traceback (most recent call last): File "visualize_data.py", line 51, in cfg = setup(args) File "visualize_data.py", line 20, in setup cfg.merge_from_file(args.config_file) File "/n/home07/michelewang/.conda/envs/active/lib/python3.8/site-packages/detectron2/config/config.py", line 54, in merge_from_file self.merge_from_other_cfg(loaded_cfg) File "/n/home07/michelewang/.conda/envs/active/lib/python3.8/site-packages/fvcore/common/config.py", line 123, in merge_from_other_cfg return super().merge_from_other_cfg(cfg_other) File "/n/home07/michelewang/.conda/envs/active/lib/python3.8/site-packages/yacs/config.py", line 217, in merge_from_other_cfg _merge_a_into_b(cfg_other, self, self, []) File "/n/home07/michelewang/.conda/envs/active/lib/python3.8/site-packages/yacs/config.py", line 478, in _merge_a_into_b _merge_a_into_b(v, b[k], root, key_list + [k]) File "/n/home07/michelewang/.conda/envs/active/lib/python3.8/site-packages/yacs/config.py", line 478, in _merge_a_into_b _merge_a_into_b(v, b[k], root, key_list + [k]) File "/n/home07/michelewang/.conda/envs/active/lib/python3.8/site-packages/yacs/config.py", line 491, in _merge_a_into_b raise KeyError("Non-existent config key: {}".format(full_key)) KeyError: 'Non-existent config key: MODEL.RESNETS.RES5_MULTI_GRID'


4. please simplify the steps as much as possible so they do not require additional resources to
     run, such as a private dataset.

## Expected behavior:
I first ran the inference script. It ran really fast, taking less than a minute. Then I ran the script to visualize my predictions, because I wanted to see how accurate Deeplab's semantic segmentation predictions for my model were. **However, I was blocked by syntax errors for the model and I don't think I did inference correctly given that it ran so fast, and the Json file of predictions, is gibberish, but I'm not sure how to do it correctly.**

## Environment:

Provide your environment information using the following command:

wget -nc -q https://github.com/facebookresearch/detectron2/raw/master/detectron2/utils/collect_env.py && python collect_env.py


If your issue looks like an installation issue / environment issue,
please first try to solve it yourself with the instructions in
https://detectron2.readthedocs.io/tutorials/install.html#common-installation-issues

**Full Error Logs for Inference** the stuff at the top is just the model reprinting all the data in my original train dataset)

{'file_name': '/n/tambe_lab/Users/michelewang/train/images/midwest-flooding_00000173_post_disaster.png', 'image_id': 2715, 'height': 1024, 'width': 1024, 'sem_seg_file_name': '/n/tambe_lab/Users/michelewang/train/bin_masks/midwest-flooding_00000173_post_disaster.png'}, {'file_name': '/n/tambe_lab/Users/michelewang/train/images/midwest-flooding_00000293_post_disaster.png', 'image_id': 2716, 'height': 1024, 'width': 1024, 'sem_seg_file_n [03/07 21:38:45 detectron2]: Rank of current process: 0. World size: 1 [03/07 21:38:48 detectron2]: Environment info:


sys.platform linux Python 3.8.5 (default, Sep 4 2020, 07:30:14) [GCC 7.3.0] numpy 1.19.2 detectron2 0.3 @/n/home07/michelewang/.conda/envs/active/lib/python3.8/site-packages/detectron2 Compiler GCC 9.2 CUDA compiler not available detectron2 arch flags /n/home07/michelewang/.conda/envs/active/lib/python3.8/site-packages/detectron2/_C.cpython-38-x86_64-linux-gnu.so DETECTRON2_ENV_MODULE PyTorch 1.7.1 @/n/home07/michelewang/.conda/envs/active/lib/python3.8/site-packages/torch PyTorch debug build False GPU available True GPU 0,1,2,3,4,5,6,7 Tesla V100-PCIE-32GB (arch=7.0) CUDA_HOME /n/helmod/apps/centos7/Core/cuda/10.2.89-fasrc01/cuda Pillow 8.1.0 torchvision 0.8.2 @/n/home07/michelewang/.conda/envs/active/lib/python3.8/site-packages/torchvision torchvision arch flags 3.5, 5.0, 6.0, 7.0, 7.5 fvcore 0.1.3.post20210220 cv2 4.4.0


PyTorch built with:

[03/07 21:38:48 detectron2]: Command line arguments: Namespace(config_file='configs/xBD-configs/base-deeplabv3.yaml', dist_url='tcp://127.0.0.1:62862', eval_only=True, machine_rank=0, num_gpus=1, num_machines=1, opts=['MODEL.WEIGHTS', './output/model_0024999.pth'], resume=False) [03/07 21:38:48 detectron2]: Contents of args.config_file=configs/xBD-configs/base-deeplabv3.yaml: BASE: base.yaml MODEL: WEIGHTS: "detectron2://DeepLab/R-103.pkl" PIXEL_MEAN: [123.675, 116.280, 103.530] PIXEL_STD: [58.395, 57.120, 57.375] BACKBONE: NAME: "build_resnet_deeplab_backbone" RESNETS: DEPTH: 101 NORM: "SyncBN" OUT_FEATURES: ["res2", "res5"] RES5_MULTI_GRID: [1, 2, 4] STEM_TYPE: "deeplab" STEM_OUT_CHANNELS: 128 STRIDE_IN_1X1: False SEM_SEG_HEAD: NAME: "DeepLabV3PlusHead" IN_FEATURES: ["res2", "res5"] PROJECT_FEATURES: ["res2"] PROJECT_CHANNELS: [48] NORM: "SyncBN" COMMON_STRIDE: 4 INPUT: FORMAT: "RGB"

[03/07 21:38:48 detectron2]: Running with full config: CUDNN_BENCHMARK: False DATALOADER: ASPECT_RATIO_GROUPING: True FILTER_EMPTY_ANNOTATIONS: True NUM_WORKERS: 10 REPEAT_THRESHOLD: 0.0 SAMPLER_TRAIN: TrainingSampler DATASETS: PRECOMPUTED_PROPOSAL_TOPK_TEST: 1000 PRECOMPUTED_PROPOSAL_TOPK_TRAIN: 2000 PROPOSAL_FILES_TEST: () PROPOSAL_FILES_TRAIN: () TEST: ('xbddata_test',) TRAIN: ('xbddata_train',) GLOBAL: HACK: 1.0 INPUT: CROP: ENABLED: True SINGLE_CATEGORY_MAX_AREA: 1.0 SIZE: [512, 1024] TYPE: absolute FORMAT: RGB MASK_FORMAT: polygon MAX_SIZE_TEST: 1024 MAX_SIZE_TRAIN: 1024 MIN_SIZE_TEST: 1024 MIN_SIZE_TRAIN: (1024,) MIN_SIZE_TRAIN_SAMPLING: choice RANDOM_FLIP: horizontal MODEL: ANCHOR_GENERATOR: ANGLES: [[-90, 0, 90]] ASPECT_RATIOS: [[0.5, 1.0, 2.0]] NAME: DefaultAnchorGenerator OFFSET: 0.0 SIZES: [[32, 64, 128, 256, 512]] BACKBONE: FREEZE_AT: 0 NAME: build_resnet_deeplab_backbone DEVICE: cuda FPN: FUSE_TYPE: sum IN_FEATURES: [] NORM: OUT_CHANNELS: 256 KEYPOINT_ON: False LOAD_PROPOSALS: False MASK_ON: False META_ARCHITECTURE: SemanticSegmentor PANOPTIC_FPN: COMBINE: ENABLED: True INSTANCES_CONFIDENCE_THRESH: 0.5 OVERLAP_THRESH: 0.5 STUFF_AREA_LIMIT: 4096 INSTANCE_LOSS_WEIGHT: 1.0 PIXEL_MEAN: [123.675, 116.28, 103.53] PIXEL_STD: [58.395, 57.12, 57.375] PROPOSAL_GENERATOR: MIN_SIZE: 0 NAME: RPN RESNETS: DEFORM_MODULATED: False DEFORM_NUM_GROUPS: 1 DEFORM_ON_PER_STAGE: [False, False, False, False] DEPTH: 101 NORM: SyncBN NUM_GROUPS: 1 OUT_FEATURES: ['res2', 'res5'] RES2_OUT_CHANNELS: 256 RES4_DILATION: 1 RES5_DILATION: 2 RES5_MULTI_GRID: [1, 2, 4] STEM_OUT_CHANNELS: 128 STEM_TYPE: deeplab STRIDE_IN_1X1: False WIDTH_PER_GROUP: 64 RETINANET: BBOX_REG_LOSS_TYPE: smooth_l1 BBOX_REG_WEIGHTS: (1.0, 1.0, 1.0, 1.0) FOCAL_LOSS_ALPHA: 0.25 FOCAL_LOSS_GAMMA: 2.0 IN_FEATURES: ['p3', 'p4', 'p5', 'p6', 'p7'] IOU_LABELS: [0, -1, 1] IOU_THRESHOLDS: [0.4, 0.5] NMS_THRESH_TEST: 0.5 NORM: NUM_CLASSES: 80 NUM_CONVS: 4 PRIOR_PROB: 0.01 SCORE_THRESH_TEST: 0.05 SMOOTH_L1_LOSS_BETA: 0.1 TOPK_CANDIDATES_TEST: 1000 ROI_BOX_CASCADE_HEAD: BBOX_REG_WEIGHTS: ((10.0, 10.0, 5.0, 5.0), (20.0, 20.0, 10.0, 10.0), (30.0, 30.0, 15.0, 15.0)) IOUS: (0.5, 0.6, 0.7) ROI_BOX_HEAD: BBOX_REG_LOSS_TYPE: smooth_l1 BBOX_REG_LOSS_WEIGHT: 1.0 BBOX_REG_WEIGHTS: (10.0, 10.0, 5.0, 5.0) CLS_AGNOSTIC_BBOX_REG: False CONV_DIM: 256 FC_DIM: 1024 NAME: FastRCNNConvFCHead NORM: NUM_CONV: 0 NUM_FC: 2 POOLER_RESOLUTION: 7 POOLER_SAMPLING_RATIO: 0 POOLER_TYPE: ROIAlignV2 SMOOTH_L1_BETA: 0.0 TRAIN_ON_PRED_BOXES: False ROI_HEADS: BATCH_SIZE_PER_IMAGE: 512 IN_FEATURES: ['res5'] IOU_LABELS: [0, 1] IOU_THRESHOLDS: [0.5] NAME: StandardROIHeads NMS_THRESH_TEST: 0.5 NUM_CLASSES: 80 POSITIVE_FRACTION: 0.25 PROPOSAL_APPEND_GT: True SCORE_THRESH_TEST: 0.05 ROI_KEYPOINT_HEAD: CONV_DIMS: (512, 512, 512, 512, 512, 512, 512, 512) LOSS_WEIGHT: 1.0 MIN_KEYPOINTS_PER_IMAGE: 1 NAME: KRCNNConvDeconvUpsampleHead NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS: True NUM_KEYPOINTS: 17 POOLER_RESOLUTION: 14 POOLER_SAMPLING_RATIO: 0 POOLER_TYPE: ROIAlignV2 ROI_MASK_HEAD: CLS_AGNOSTIC_MASK: False CONV_DIM: 256 NAME: MaskRCNNConvUpsampleHead NORM: NUM_CONV: 4 POOLER_RESOLUTION: 14 POOLER_SAMPLING_RATIO: 0 POOLER_TYPE: ROIAlignV2 RPN: BATCH_SIZE_PER_IMAGE: 256 BBOX_REG_LOSS_TYPE: smooth_l1 BBOX_REG_LOSS_WEIGHT: 1.0 BBOX_REG_WEIGHTS: (1.0, 1.0, 1.0, 1.0) BOUNDARY_THRESH: -1 HEAD_NAME: StandardRPNHead IN_FEATURES: ['res5'] IOU_LABELS: [0, -1, 1] IOU_THRESHOLDS: [0.3, 0.7] LOSS_WEIGHT: 1.0 NMS_THRESH: 0.7 POSITIVE_FRACTION: 0.5 POST_NMS_TOPK_TEST: 1000 POST_NMS_TOPK_TRAIN: 2000 PRE_NMS_TOPK_TEST: 6000 PRE_NMS_TOPK_TRAIN: 12000 SMOOTH_L1_BETA: 0.0 SEM_SEG_HEAD: ASPP_CHANNELS: 256 ASPP_DILATIONS: [6, 12, 18] ASPP_DROPOUT: 0.1 COMMON_STRIDE: 4 CONVS_DIM: 256 IGNORE_VALUE: 255 IN_FEATURES: ['res2', 'res5'] LOSS_TYPE: hard_pixel_mining LOSS_WEIGHT: 1.0 NAME: DeepLabV3PlusHead NORM: SyncBN NUM_CLASSES: 3 PROJECT_CHANNELS: [48] PROJECT_FEATURES: ['res2'] USE_DEPTHWISE_SEPARABLE_CONV: False WEIGHTS: ./output/model_0024999.pth OUTPUT_DIR: ./output SEED: -1 SOLVER: AMP: ENABLED: False BASE_LR: 0.01 BIAS_LR_FACTOR: 1.0 CHECKPOINT_PERIOD: 5000 CLIP_GRADIENTS: CLIP_TYPE: value CLIP_VALUE: 1.0 ENABLED: False NORM_TYPE: 2.0 GAMMA: 0.1 IMS_PER_BATCH: 16 LR_SCHEDULER_NAME: WarmupPolyLR MAX_ITER: 90000 MOMENTUM: 0.9 NESTEROV: False POLY_LR_CONSTANT_ENDING: 0.0 POLY_LR_POWER: 0.9 REFERENCE_WORLD_SIZE: 0 STEPS: (60000, 80000) WARMUP_FACTOR: 0.001 WARMUP_ITERS: 1000 WARMUP_METHOD: linear WEIGHT_DECAY: 0.0001 WEIGHT_DECAY_BIAS: 0.0001 WEIGHT_DECAY_NORM: 0.0 TEST: AUG: ENABLED: False FLIP: True MAX_SIZE: 4000 MIN_SIZES: (400, 500, 600, 700, 800, 900, 1000, 1100, 1200) DETECTIONS_PER_IMAGE: 100 EVAL_PERIOD: 0 EXPECTED_RESULTS: [] KEYPOINT_OKS_SIGMAS: [] PRECISE_BN: ENABLED: False NUM_ITER: 200 VERSION: 2 VIS_PERIOD: 0 [03/07 21:38:48 detectron2]: Full config saved to ./output/config.yaml [03/07 21:38:48 d2.utils.env]: Using a generated random seed 48437111

cfg.MODEL.WEIGHTS ./output/model_0024999.pth cfg.OUTPUT_DIR ./output [03/07 21:38:52 fvcore.common.checkpoint]: Loading checkpoint from ./output/model_0024999.pth WARNING [03/07 21:38:52 fvcore.common.checkpoint]: Skip loading parameter 'sem_seg_head.predictor.weight' to the model due to incompatible shapes: (19, 256, 1, 1) in the checkpoint but (3, 256, 1, 1) in the model! You might want to double check if this is expected. WARNING [03/07 21:38:52 fvcore.common.checkpoint]: Skip loading parameter 'sem_seg_head.predictor.bias' to the model due to incompatible shapes: (19,) in the checkpoint but (3,) in the model! You might want to double check if this is expected. [03/07 21:38:52 fvcore.common.checkpoint]: Some model parameters or buffers are not found in the checkpoint: sem_seg_head.predictor.{bias, weight} [03/07 21:38:53 d2.data.dataset_mapper]: [DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(1024, 1024), max_size=1024, sample_style='choice')] [03/07 21:38:53 d2.data.common]: Serializing 933 elements to byte tensors and concatenating them all ... [03/07 21:38:53 d2.data.common]: Serialized dataset takes 0.23 MiB datasetname xbddata_test WARNING [03/07 21:38:53 d2.evaluation.sem_seg_evaluation]: SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata. WARNING [03/07 21:38:53 d2.evaluation.sem_seg_evaluation]: SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata. [03/07 21:38:54 d2.evaluation.evaluator]: Start inference on 933 images [03/07 21:38:56 d2.evaluation.evaluator]: Inference done 11/933. 0.0817 s / img. ETA=0:01:55 [03/07 21:39:01 d2.evaluation.evaluator]: Inference done 51/933. 0.0817 s / img. ETA=0:01:50 [03/07 21:39:06 d2.evaluation.evaluator]: Inference done 70/933. 0.0817 s / img. ETA=0:02:26 [03/07 21:39:11 d2.evaluation.evaluator]: Inference done 110/933. 0.0818 s / img. ETA=0:02:06 [03/07 21:39:17 d2.evaluation.evaluator]: Inference done 150/933. 0.0818 s / img. ETA=0:01:54 [03/07 21:39:22 d2.evaluation.evaluator]: Inference done 190/933. 0.0819 s / img. ETA=0:01:45 [03/07 21:39:27 d2.evaluation.evaluator]: Inference done 229/933. 0.0820 s / img. ETA=0:01:38 [03/07 21:39:32 d2.evaluation.evaluator]: Inference done 270/933. 0.0820 s / img. ETA=0:01:30 [03/07 21:39:37 d2.evaluation.evaluator]: Inference done 310/933. 0.0819 s / img. ETA=0:01:24 [03/07 21:39:42 d2.evaluation.evaluator]: Inference done 351/933. 0.0819 s / img. ETA=0:01:18 [03/07 21:39:47 d2.evaluation.evaluator]: Inference done 391/933. 0.0819 s / img. ETA=0:01:12 [03/07 21:39:52 d2.evaluation.evaluator]: Inference done 432/933. 0.0819 s / img. ETA=0:01:06 [03/07 21:39:57 d2.evaluation.evaluator]: Inference done 473/933. 0.0819 s / img. ETA=0:01:00 [03/07 21:40:02 d2.evaluation.evaluator]: Inference done 514/933. 0.0819 s / img. ETA=0:00:54 [03/07 21:40:07 d2.evaluation.evaluator]: Inference done 554/933. 0.0818 s / img. ETA=0:00:49 [03/07 21:40:12 d2.evaluation.evaluator]: Inference done 595/933. 0.0818 s / img. ETA=0:00:44 [03/07 21:40:17 d2.evaluation.evaluator]: Inference done 636/933. 0.0818 s / img. ETA=0:00:38 [03/07 21:40:23 d2.evaluation.evaluator]: Inference done 677/933. 0.0818 s / img. ETA=0:00:33 [03/07 21:40:28 d2.evaluation.evaluator]: Inference done 717/933. 0.0818 s / img. ETA=0:00:27 [03/07 21:40:33 d2.evaluation.evaluator]: Inference done 758/933. 0.0818 s / img. ETA=0:00:22 [03/07 21:40:38 d2.evaluation.evaluator]: Inference done 799/933. 0.0818 s / img. ETA=0:00:17 [03/07 21:40:43 d2.evaluation.evaluator]: Inference done 840/933. 0.0818 s / img. ETA=0:00:11 [03/07 21:40:48 d2.evaluation.evaluator]: Inference done 881/933. 0.0818 s / img. ETA=0:00:06 [03/07 21:40:53 d2.evaluation.evaluator]: Inference done 922/933. 0.0818 s / img. ETA=0:00:01 [03/07 21:40:54 d2.evaluation.evaluator]: Total inference time: 0:01:59.094921 (0.128335 s / img per device, on 1 devices) [03/07 21:40:54 d2.evaluation.evaluator]: Total inference pure compute time: 0:01:15 (0.081782 s / img per device, on 1 devices) [03/07 21:40:56 d2.evaluation.sem_seg_evaluation]: OrderedDict([('sem_seg', {'mIoU': 14.286231780375028, 'fwIoU': 30.412502468859948, 'IoU-0': 31.958253593820217, 'IoU-1': 5.555678249276511, 'IoU-2': 5.344763498028353, 'mACC': 53.518182755044954, 'pACC': 34.17634953767668, 'ACC-0': 32.13264257356594, 'ACC-1': 69.63928432518483, 'ACC-2': 58.78262136638409})]) [03/07 21:40:56 d2.engine.defaults]: Evaluation results for xbddata_test in csv format: [03/07 21:40:56 d2.evaluation.testing]: copypaste: Task: sem_seg [03/07 21:40:56 d2.evaluation.testing]: copypaste: mIoU,fwIoU,mACC,pACC [03/07 21:40:56 d2.evaluation.testing]: copypaste: 14.2862,30.4125,53.5182,34.1763

ppwwyyxx commented 3 years ago

Showing full logs that contain the error would help. I guess you missed add_deeplab_config(cfg) in the second script.

Also, visualize_data does not visualize predictions (see https://github.com/facebookresearch/detectron2/tree/master/tools/ ).

michelewang commented 3 years ago

Hi @ppwwyyxx, thanks so much for your response and for those points-- I really appreciate it!

Is there any way to visualize the predictions of DeepLab's segmentation, to see if my inference ran correctly, and to see if the model was somewhat close in its predictions?

Also, the only outputs I received were the sem_seg_evaluation.pthand sem_seg_predictions.json files in output/inference, but my json file looked like the photo attached where it was totally unparseable – is that normal? That was why I was wondering whether inference worked and also whether there was code to turn that into masks for me.

Thank you so much again for your response!!!

michelewang commented 3 years ago

I also just attached the full error log from the inference script and a different error I was getting from running visualize_data.py since I had attached the wrong error!

michelewang commented 3 years ago

@ppwwyyxx Sorry, I thought about this issue more and I think there's something wrong with my inference rather than my visualizations. Do you happen to know if this might be an inference issue, and how to resolve it? I don't think my json output is supposed to be unparseable / undecipherable so just want to make sure.

ppwwyyxx commented 3 years ago

To visualize the prediction: once you obtain the prediction (the mask) you can draw it use other libraries, or https://detectron2.readthedocs.io/en/latest/modules/utils.html#detectron2.utils.visualizer.Visualizer.draw_sem_seg.

The masks in the json file are encoded in COCO RLE format and is normal.

michelewang commented 3 years ago

thanks @ppwwyyxx , it's good to know the json file is normal! However, how does the pycocotools/mask.py or mask.pyx suggested by @pieterbl86 work with the Detectron2 Json output?

When I try to decode it by doing m = mask_util.decode(data[0]['segmentation']['counts']), where data is the sem_seg_predictions.json file outputted by inference I get a TypeError: string indices must be integers. I need to index into segmentation and counts because thats the only way I can access the RLE part of the output,

My Json was something like this:

{'file_name': '/n/tambe_lab/Users/michelewang/test/images/hurricane-matthew_00000118_post_disaster.png', 'category_id': 0, 'segmentation': {'size': [1024, 1024], 'counts': ']h09>K1:CGn02h2MVe05`WOK1:CGn02h2M`f09lUOFm04g2Mbf02TWOM`n0NiP`00Wo_O101O000O11N10hhe00WWZO2N20N^l2M^jf31WYVL0bTd33Yk[L4N1O1N2O1N3VTODjg0>UXOEhg0;WXOGig00dTOMb36gg0McXO3]g0McXO4]g0KcXO7\\g0HdXO9\\g0GcXO9ag0AaXO?`k0O1N2O1O2N2O1O0O10000000N3L4M2001O1HbPOL_o01YP1NQPO101OO1O`S50`lJ21Mbm?0^R@2O01N`RT14\\mkN2O0WP4NeoK7M23M2M3Mie=L[ZB1O100001OH1_POOao017OTmh02jRWO1N2N3N1O10O1N200O2O001N[^5MgaJ2G:M3N1O2N1001N7J001N10hFFkB9U=GlB7U=IlB6_f0O10Z^O0\\30dL1\\3MeL2ga0M2O0OSbS13j]lN2O1O1VFIjC6U<LkC2U<OlCOo:0Y\\O2i8Ln:3\\[O1c00mm0O_QO1e00lm0O_QO1d01lm00^QO0f00lm00^QO0g0Kom05ZQOObo00^PO0bo01]POOdo0NSme00USZO1dPOOhn00XQO0hn0OYQO0\\o00O1M4N1O1RNITTO7kk0IUTO7kk0IVTO6jk0JVTO6km0O1O1NVn`1MnQ_N00001kT4OUkK2N5ZPOKWo0`0O0O001OO1O2M2N3O2N1L4O2Mekh00\\TWO2eSO1QNLlh06RYONQNOih05VYOLQNN_h0a0aYO@Rh0b0e31001O1O1O1O1N2O]VOBec07_VOKj53fc0OdVOKg57mi01`POHUo09gPOJYo0>00O2M2O100000_OkPO7Vo0EmPO:^o0OO1O26I000O1N1O^XO0dg001OPP3OnoL4N1000O001OfGNi@0TX21__N00OUb5OQaI1gi0OYVO0oe>2jVB2O0O100000000O010000O01VUOKie04XZOMmj011OO01O00TTj00URm00fiXN2O1nPO0d=0k1Ob@1a=2]a00N1O11O4L^c6Nd\\I2O1O01NQP4OmlK2VVOOii02VVONji03R37KNO0iROGhj06RUOJlM2Rm02QUO7Zk0GWTO`0cm00100O001O0KcPOH^o08411O00fPOGnn07QQOLon03PQONPo02QQOMPo01RQOMon02SQOMil5OhRK0O1N2O1000O1O2MYb61g]I20O1NaYd00_f[O4TQh1'}} {'file_name': '/n/tambe_lab/Users/michelewang/test/images/hurricane-matthew_00000118_post_disaster.png', 'category_id': 1, 'segmentation': {'size': [1024, 1024], 'counts': '04X15jN>9ZO>7n42_J3_26cNFTOe0KJNn1S2bMYNh0HZO5O0Ij1e1B]N^O5jN>9ZO>7n42_J3_26cNFTOe0KJNn1S2bMYNh00  #THERE IS MORE, JUST DID NOT DISPLAY HERE

I ran this command:

import pycocotools.mask as mask_util
m = mask_util.decode(data[0]['segmentation']['counts'])

and this was my full error trace; I'm just not sure how to resolve this since data[0]['segmentation']['counts'] seems to be the only way I can access the RLE format.

TypeError                                 Traceback (most recent call last)
<ipython-input-20-4f4ddf7b39fd> in <module>
      1 # theres gotta be a. way
----> 2 m = mask_util.decode(data[0]['segmentation']['counts'])

~/.conda/envs/active/lib/python3.8/site-packages/pycocotools/mask.py in decode(rleObjs)
     89         return _mask.decode(rleObjs)
     90     else:
---> 91         return _mask.decode([rleObjs])[:,:,0]
     92 
     93 def area(rleObjs):

pycocotools/_mask.pyx in pycocotools._mask.decode()

pycocotools/_mask.pyx in pycocotools._mask._frString()

TypeError: string indices must be integers
ppwwyyxx commented 3 years ago

The json file is not meant to be used directly. Best way to obtain prediction is to just run the model (like https://detectron2.readthedocs.io/en/latest/tutorials/models.html#use-a-model or https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5)

pycocotools probably should be used like mask_util.decode(data[0]['segmentation']). But that's not related to detectron2

michelewang commented 3 years ago

Hi @ppwwyyxx, thanks so much for your response! I saw that section of the Colab tutorial but how would that work with DeepLab? Since the DeepLab readme here says to run an inference script for model evaluation.

image Would I basically need to replace COCOevaluator with SemSegEvaluator and replace inference_on_dataset with part of the code from train_net.py in DeepLab?

ppwwyyxx commented 3 years ago

https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5#scrollTo=U5LhISJqWXgM this is the section that display predictions - as I understand that's the goal of this issue

michelewang commented 3 years ago

Sorry about that / all the questions, but I think my issue is less about displaying the predictions and rather getting predictions in an accessible way (ie masks, like is done in the tutorial). The DeepLab Readme seems to say the only way to get predictions is by running the script; does the Tutorial work for DeepLab too? I don't know if DefaultTrainer or predictor work for DeepLab.

BartvanMarrewijk commented 3 years ago

Normally the predictor should work in DeepLab; it is also in the documentation. I tried changing the code from colab to get it working with deeplab, but I get an error using the v.draw_sem_seg(). The output of a semantic segmentation is a shape of C,H,W, but these output format is numpy float 32. v.draw_sem_seg expect an integer corresponding with the predicted label. @ppwwyyxx do you have any clue how to solve this? maybe a softmax? out = v.draw_sem_seg(outputs.to("cpu")) File "/home/ubuntu16/detectron2/detectron2/utils/visualizer.py", line 439, in draw_sem_seg text = self.metadata.stuff_classes[label] TypeError: list indices must be integers or slices, not numpy.float32

from detectron2.engine import DefaultTrainer, DefaultPredictor import os from itertools import chain import cv2 import tqdm

from detectron2.config import get_cfg from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_train_loader from detectron2.utils.visualizer import Visualizer from detectron2.projects.deeplab import add_deeplab_config

dataset_dicts = DatasetCatalog.get('cityscapes_fine_sem_seg_train') metadata = MetadataCatalog.get('cityscapes_fine_sem_seg_train')

cfg = get_cfg() add_deeplab_config(cfg) cfg.merge_from_file('configs/Cityscapes-SemanticSegmentation/deeplab_v3_plus_R_103_os16_mg124_poly_90k_bs16.yaml') cfg.MODEL.WEIGHTS = 'model_final_a8a355_deeplabv3+.pkl' cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold predictor = DefaultPredictor(cfg) from detectron2.utils.visualizer import ColorMode import random # dataset_dicts = get_balloon_dicts("balloon/val") for d in random.sample(dataset_dicts, 3): im = cv2.imread(d["file_name"]) outputs = predictor(im)["sem_seg"] v = Visualizer(im[:, :, ::-1],metadata=metadata,scale=0.5) out = v.draw_sem_seg(outputs.to("cpu")) cv2_imshow(out.get_image()[:, :, ::-1])

git-haddadz commented 3 years ago

Normally the predictor should work in DeepLab; it is also in the documentation. I tried changing the code from colab to get it working with deeplab, but I get an error using the v.draw_sem_seg(). The output of a semantic segmentation is a shape of C,H,W, but these output format is numpy float 32. v.draw_sem_seg expect an integer corresponding with the predicted label. @ppwwyyxx do you have any clue how to solve this? maybe a softmax? out = v.draw_sem_seg(outputs.to("cpu")) File "/home/ubuntu16/detectron2/detectron2/utils/visualizer.py", line 439, in draw_sem_seg text = self.metadata.stuff_classes[label] TypeError: list indices must be integers or slices, not numpy.float32

from detectron2.engine import DefaultTrainer, DefaultPredictor import os from itertools import chain import cv2 import tqdm

from detectron2.config import get_cfg from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_train_loader from detectron2.utils.visualizer import Visualizer from detectron2.projects.deeplab import add_deeplab_config

dataset_dicts = DatasetCatalog.get('cityscapes_fine_sem_seg_train') metadata = MetadataCatalog.get('cityscapes_fine_sem_seg_train')

cfg = get_cfg() add_deeplab_config(cfg) cfg.merge_from_file('configs/Cityscapes-SemanticSegmentation/deeplab_v3_plus_R_103_os16_mg124_poly_90k_bs16.yaml') cfg.MODEL.WEIGHTS = 'model_final_a8a355_deeplabv3+.pkl' cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold predictor = DefaultPredictor(cfg) from detectron2.utils.visualizer import ColorMode import random # dataset_dicts = get_balloon_dicts("balloon/val") for d in random.sample(dataset_dicts, 3): im = cv2.imread(d["file_name"]) outputs = predictor(im)["sem_seg"] v = Visualizer(im[:, :, ::-1],metadata=metadata,scale=0.5) out = v.draw_sem_seg(outputs.to("cpu")) cv2_imshow(out.get_image()[:, :, ::-1])

@studentWUR I have the same problem did you find a solution to obtain integer label from the prediction? or any other solution to visualize the prediction of the Deeplab? thanks

BartvanMarrewijk commented 3 years ago

I had the following solution. It works for my custom dataset in which my background correspond with an output of '0'. For the cityscapes every class works, except for the background class. Of course it is also possible to write a custom drawing function since the output is already an integer.

values2, outputs2 = torch.max(F.softmax(outputs,dim=0),axis=0) out = v.draw_sem_seg(outputs2.to("cpu"))

ananthu-aniraj commented 3 years ago

Hi everyone, this is the code that works for me (for DeepLab inference). This can be used for running deep lab on a webcam:

cap = cv2.VideoCapture(0)
cfg_file = "configs/custom_configs/deeplab_v3_plus_R_103_os16_mg124_poly_90k_bs16_imow_without_background.yaml"
cfg = get_cfg()
add_deeplab_config(cfg)
cfg.merge_from_file(cfg_file)
cfg.INPUT.CROP.ENABLED = False
cfg.MODEL.WEIGHTS = "output/model_v3_plus_without_bg_c1.pth"
cfg.freeze()
predictor = DefaultPredictor(cfg)
while(cap.isOpened()):   
    ret, frame = cap.read()
    if ret==True:

        outputs = predictor(frame)

        v = Visualizer(frame[:,:,::-1],
                        metadata=val_metadata_imow_nb,
                        scale=1,
                        instance_mode=ColorMode.SEGMENTATION
                        )

        sem_seg = torch.max(outputs["sem_seg"],dim=0)[1]
        sem_seg = sem_seg.to("cpu")
        sem_seg = sem_seg.numpy()
        cv2.imshow("Image",frame)

        out = v.draw_sem_seg(sem_seg, area_threshold=None, alpha=0.8)

        cv2.imshow("Seg_out",out.get_image()[:, :, ::-1])
        # Display the resulting frame

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    else:
        break
cap.release()

cv2.destroyAllWindows()
hydrangea3000 commented 1 year ago

Hello @ppwwyyxx , Could you please help with the similar issue:

cfg = get_cfg()
add_deeplab_config(cfg)

cfg_path = 'C:/Users/.../detectron2/projects/Panoptic-DeepLab/configs/Cityscapes-PanopticSegmentation/'

yaml =  "panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024.yaml"
cfg.merge_from_file(cfg_path + yaml)

And I get error KeyError: 'Non-existent config key: MODEL.SEM_SEG_HEAD.HEAD_CHANNELS'

same logic for Object detecion on COCO (from /detectron2/configs/) works without errors