open-mmlab / mmsegmentation

OpenMMLab Semantic Segmentation Toolbox and Benchmark.
https://mmsegmentation.readthedocs.io/en/main/
Apache License 2.0
7.99k stars 2.57k forks source link

assert palette.shape[0] == len(self.CLASSES) #2023

Open sunghyun-nam opened 2 years ago

sunghyun-nam commented 2 years ago

Hello.

Thank you for sharing your repo.

When I run demo file, I got an Error like "assert palette.shape[0] == len(self.CLASSES)"

It seems to be an error that occurs when using a different palette.

I tried demo with deeplabv3 (using pascal_context_59), and the palette value was 'voc'

--> class_names.py line number 271 ----- voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug'],

MengzhangLI commented 2 years ago

Hi, the reason you have figured it out: the palette value and self.CLASSES is not the same. You can check the root line of palette and self.CLASSES, i.e., where are these two parameters defined in the demo file. Then uncomment or give correct value of palette.

Plus, it may be a bug of demo file, if yes, we would fix it asap.

Best,

sunghyun-nam commented 2 years ago

Thank you for your reply.

When I use other model (using cocostuff or ade datasets), It works well.

I don't know the cause of the error...

MengzhangLI commented 2 years ago

Could you list your detailed command or link of demo file?

sunghyun-nam commented 2 years ago

I made webcam_demo.py by referring to 'mmdetection/webcam_demo.py' https://github.com/open-mmlab/mmdetection/blob/master/demo/webcam_demo.py

command line : $ cd mmdetection $ python demo/webcam_demo.py

I gave arguments in the code.

It has not been revised to a deeper extent. (LIKE. init_segmentor(), inference_segmentor(), show_result()) ` from argparse import ArgumentParser

import cv2 import torch import numpy as np from mmseg.apis import inference_segmentor, init_segmentor from mmseg.core.evaluation import get_palette

import threading

import timeit

deeplab_config = 'configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k.py' deeplab_check = 'checkpoint/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k.pth'

deeplabv3_config = 'configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59.py' deeplabv3_check = 'checkpoint/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59.pth'

pspnet_config = 'configs/pspnet/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k.py' pspnet_check = 'checkpoint/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k.pth'

twins_config = 'configs/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py' twins_check = 'checkpoint/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.pth'

upernet_config = 'configs/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k.py' upernet_check = 'checkpoint/upernet_convnext_xlarge_fp16_640x640_160k_ade20k.pth'

kupernet_config = 'configs/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k.py' kupernet_check = 'checkpoint/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k.pth'

coco = 'cocostuff' ade = 'ade' voc = 'voc' ` def parse_args(): parser = ArgumentParser(description='MMSegmentation webcam demo') parser.add_argument('--config', default=deeplab_config, help='test config file path') parser.add_argument('--checkpoint', default=deeplab_check, help='checkpoint file') parser.add_argument( '--device', type=str, default='cuda:0', help='CPU/CUDA device option') parser.add_argument( '--palette', default=coco, help='Color palette used for segmentation map') parser.add_argument( '--camera-id', type=int, default=0, help='camera device id') parser.add_argument( '--show', action='store_true', default=True, help='Whether to show draw result') parser.add_argument( '--show-wait-time', default=1, type=int, help='Wait time after imshow') parser.add_argument( '--output-file', default=None, type=str, help='Output video file path') parser.add_argument( '--output-fourcc', default='MJPG', type=str, help='Fourcc of the output video') parser.add_argument( '--output-fps', default=60, type=int, help='FPS of the output video') parser.add_argument( '--output-height', default=-1, type=int, help='Frame height of the output video') parser.add_argument( '--output-width', default=-1, type=int, help='Frame width of the output video') parser.add_argument( '--opacity', type=float, default=0.5, help='Opacity of painted segmentation map. In (0, 1] range.')

args = parser.parse_args()
return args

def main(): args = parse_args()

device = torch.device(args.device)

model = init_segmentor(args.config, args.checkpoint, device=device)

cap = cv2.VideoCapture(args.camera_id)
assert (cap.isOpened())
input_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
input_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
input_fps = cap.get(cv2.CAP_PROP_FPS)

# init output video
writer = None
output_height = None
output_width = None
if args.output_file is not None:
    fourcc = cv2.VideoWriter_fourcc(*args.output_fourcc)
    output_fps = args.output_fps if args.output_fps > 0 else input_fps
    output_height = args.output_height if args.output_height > 0 else int(
        input_height)
    output_width = args.output_width if args.output_width > 0 else int(
        input_width)
    writer = cv2.VideoWriter(args.output_file, fourcc, output_fps,
                             (output_width, output_height), True)

try:
    while True:
        flag, frame = cap.read()
        if not flag:
            break
        # test a single image
        result = inference_segmentor(model, frame)

        # blend raw image and prediction
        draw_img = model.show_result(
            frame,
            result,
            palette=get_palette(args.palette),
            show=False,
            opacity=args.opacity)

        cv2.imshow('video_demo', draw_img)

        cv2.waitKey(args.show_wait_time)

        if writer:
            if draw_img.shape[0] != output_height or draw_img.shape[1] != output_width:
                draw_img = cv2.resize(draw_img,(output_width, output_height))
            writer.write(draw_img)
finally:
    if writer:
        writer.release()
    cap.release()

if name == 'main': main()

jiemodad commented 1 year ago

Hello, I am having a similar issue. I work fine when using the cityscapes dataset, but I have the same bug when using the ADE dataset, have you solved it?

jiemodad commented 1 year ago

Thanks, I fixed this issue. I forgot to set the --palette parameter

laobao666 commented 1 year ago

I use demo to run with deeplabv3plus (loveda) .It occurs same question. How to solve this problem