Megvii-BaseDetection / YOLOX

YOLOX is a high-performance anchor-free YOLO, exceeding yolov3~v5 with MegEngine, ONNX, TensorRT, ncnn, and OpenVINO supported. Documentation: https://yolox.readthedocs.io/
Apache License 2.0
9.43k stars 2.21k forks source link

assert img is not None #697

Open nateeinit opened 3 years ago

nateeinit commented 3 years ago

File "tools/train.py", line 132, in args=(exp, args), │ └ Namespace(batch_size=16, cache=False, ckpt='weights/yolox_nano.pth', devices=1, dist_backend='nccl', dist_url=None, exp_file=... └ ╒══════════════════╤═════════════════════════════════════════════════════════════════════════════════════════════════════════...

File "/root/YOLOX/yolox/core/launch.py", line 98, in launch main_func(*args) │ └ (╒══════════════════╤════════════════════════════════════════════════════════════════════════════════════════════════════════... └ <function main at 0x7fd6415a99d8>

File "tools/train.py", line 110, in main trainer.train() │ └ <function Trainer.train at 0x7fd64181a158> └ <yolox.core.trainer.Trainer object at 0x7fd6415b3da0>

File "/root/YOLOX/yolox/core/trainer.py", line 70, in train self.before_train() │ └ <function Trainer.before_train at 0x7fd64158cf28> └ <yolox.core.trainer.Trainer object at 0x7fd6415b3da0>

File "/root/YOLOX/yolox/core/trainer.py", line 152, in before_train self.prefetcher = DataPrefetcher(self.train_loader) │ │ │ └ <yolox.data.dataloading.DataLoader object at 0x7fd63321de10> │ │ └ <yolox.core.trainer.Trainer object at 0x7fd6415b3da0> │ └ <class 'yolox.data.data_prefetcher.DataPrefetcher'> └ <yolox.core.trainer.Trainer object at 0x7fd6415b3da0>

File "/root/YOLOX/yolox/data/data_prefetcher.py", line 21, in init self.preload() │ └ <function DataPrefetcher.preload at 0x7fd641880620> └ <yolox.data.data_prefetcher.DataPrefetcher object at 0x7fd64f9ebcf8>

File "/root/YOLOX/yolox/data/data_prefetcher.py", line 25, in preload self.next_input, self.nexttarget, , _ = next(self.loader) │ │ │ └ <torch.utils.data.dataloader._MultiProcessingDataLoaderIter object at 0x7fd64f973198> │ │ └ <yolox.data.data_prefetcher.DataPrefetcher object at 0x7fd64f9ebcf8> │ └ <yolox.data.data_prefetcher.DataPrefetcher object at 0x7fd64f9ebcf8> └ <yolox.data.data_prefetcher.DataPrefetcher object at 0x7fd64f9ebcf8>

File "/root/anaconda3/envs/py36/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 521, in next data = self._next_data() │ └ <function _MultiProcessingDataLoaderIter._next_data at 0x7fd64f9af730> └ <torch.utils.data.dataloader._MultiProcessingDataLoaderIter object at 0x7fd64f973198> File "/root/anaconda3/envs/py36/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 1203, in _next_data return self._process_data(data) │ │ └ <torch._utils.ExceptionWrapper object at 0x7fd632f4e748> │ └ <function _MultiProcessingDataLoaderIter._process_data at 0x7fd64f9af840> └ <torch.utils.data.dataloader._MultiProcessingDataLoaderIter object at 0x7fd64f973198> File "/root/anaconda3/envs/py36/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 1229, in _process_data data.reraise() │ └ <function ExceptionWrapper.reraise at 0x7fd6e974c7b8> └ <torch._utils.ExceptionWrapper object at 0x7fd632f4e748> File "/root/anaconda3/envs/py36/lib/python3.6/site-packages/torch/_utils.py", line 425, in reraise raise self.exc_type(msg) │ │ └ 'Caught AssertionError in DataLoader worker process 0.\nOriginal Traceback (most recent call last):\n File "/root/anaconda3/... │ └ <class 'AssertionError'> └ <torch._utils.ExceptionWrapper object at 0x7fd632f4e748>

AssertionError: Caught AssertionError in DataLoader worker process 0. Original Traceback (most recent call last): File "/root/anaconda3/envs/py36/lib/python3.6/site-packages/torch/utils/data/_utils/worker.py", line 287, in _worker_loop data = fetcher.fetch(index) File "/root/anaconda3/envs/py36/lib/python3.6/site-packages/torch/utils/data/_utils/fetch.py", line 44, in fetch data = [self.dataset[idx] for idx in possibly_batched_index] File "/root/anaconda3/envs/py36/lib/python3.6/site-packages/torch/utils/data/_utils/fetch.py", line 44, in data = [self.dataset[idx] for idx in possibly_batched_index] File "/root/YOLOX/yolox/data/datasets/datasets_wrapper.py", line 110, in wrapper ret_val = getitem_fn(self, index) File "/root/YOLOX/yolox/data/datasets/mosaicdetection.py", line 95, in getitem img, labels, , _ = self._dataset.pull_item(index) File "/root/YOLOX/yolox/data/datasets/voc.py", line 240, in pull_item img = self.load_resized_img(index) File "/root/YOLOX/yolox/data/datasets/voc.py", line 207, in load_resized_img img = self.load_image(index) File "/root/YOLOX/yolox/data/datasets/voc.py", line 220, in load_image assert img is not None AssertionError

my voc.py

!/usr/bin/env python3

-- coding:utf-8 --

Code are based on

https://github.com/fmassa/vision/blob/voc_dataset/torchvision/datasets/voc.py

Copyright (c) Francisco Massa.

Copyright (c) Ellis Brown, Max deGroot.

Copyright (c) Megvii, Inc. and its affiliates.

import os import os.path import pickle import xml.etree.ElementTree as ET from loguru import logger

import cv2 import numpy as np

from yolox.evaluators.voc_eval import voc_eval

from .datasets_wrapper import Dataset from .voc_classes import VOC_CLASSES

class AnnotationTransform(object):

"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes

Arguments:
    class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
        (default: alphabetic indexing of VOC's 20 classes)
    keep_difficult (bool, optional): keep difficult instances or not
        (default: False)
    height (int): height
    width (int): width
"""

def __init__(self, class_to_ind=None, keep_difficult=True):
    self.class_to_ind = class_to_ind or dict(
        zip(VOC_CLASSES, range(len(VOC_CLASSES)))
    )
    self.keep_difficult = keep_difficult

def __call__(self, target):
    """
    Arguments:
        target (annotation) : the target annotation to be made usable
            will be an ET.Element
    Returns:
        a list containing lists of bounding boxes  [bbox coords, class name]
    """
    res = np.empty((0, 5))
    for obj in target.iter("object"):
        difficult = obj.find("difficult")
        if difficult is not None:
            difficult = int(difficult.text) == 1
        else:
            difficult = False
        if not self.keep_difficult and difficult:
            continue
        name = obj.find("name").text.strip()
        bbox = obj.find("bndbox")

        pts = ["xmin", "ymin", "xmax", "ymax"]
        bndbox = []
        for i, pt in enumerate(pts):
            cur_pt = int(bbox.find(pt).text) - 1
            # scale height or width
            # cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
            bndbox.append(cur_pt)
        label_idx = self.class_to_ind[name]
        bndbox.append(label_idx)
        res = np.vstack((res, bndbox))  # [xmin, ymin, xmax, ymax, label_ind]
        # img_id = target.find('filename').text[:-4]

    width = int(target.find("size").find("width").text)
    height = int(target.find("size").find("height").text)
    img_info = (height, width)

    return res, img_info

class VOCDetection(Dataset):

"""
VOC Detection Dataset Object

input is image, target is annotation

Args:
    root (string): filepath to VOCdevkit folder.
    image_set (string): imageset to use (eg. 'train', 'val', 'test')
    transform (callable, optional): transformation to perform on the
        input image
    target_transform (callable, optional): transformation to perform on the
        target `annotation`
        (eg: take in caption string, return tensor of word indices)
    dataset_name (string, optional): which dataset to load
        (default: 'VOC2007')
"""

def __init__(
    self,
    data_dir,
    image_sets=[("2007", "trainval"), ("2012", "trainval")],
    img_size=(416, 416),
    preproc=None,
    target_transform=AnnotationTransform(),
    dataset_name="VOC0712",
    cache=False,
):
    super().__init__(img_size)
    self.root = data_dir
    self.image_set = image_sets
    self.img_size = img_size
    self.preproc = preproc
    self.target_transform = target_transform
    self.name = dataset_name
    self._annopath = os.path.join("%s", "Annotations", "%s.xml")
    self._imgpath = os.path.join("%s", "JPEGImages", "%s.png")
    self._classes = VOC_CLASSES
    self.ids = list()
    for (year, name) in image_sets:
        self._year = year
        rootpath = os.path.join(self.root, "VOC" + year)
        for line in open(
            os.path.join(rootpath, "ImageSets", "Main", name + ".txt")
        ):
            self.ids.append((rootpath, line.strip()))

    self.annotations = self._load_coco_annotations()
    self.imgs = None
    if cache:
        self._cache_images()

def __len__(self):
    return len(self.ids)

def _load_coco_annotations(self):
    return [self.load_anno_from_ids(_ids) for _ids in range(len(self.ids))]

def _cache_images(self):
    logger.warning(
        "\n********************************************************************************\n"
        "You are using cached images in RAM to accelerate training.\n"
        "This requires large system RAM.\n"
        "Make sure you have 60G+ RAM and 19G available disk space for training VOC.\n"
        "********************************************************************************\n"
    )
    max_h = self.img_size[0]
    max_w = self.img_size[1]
    cache_file = self.root + "/img_resized_cache_" + self.name + ".array"
    if not os.path.exists(cache_file):
        logger.info(
            "Caching images for the frist time. This might take about 3 minutes for VOC"
        )
        self.imgs = np.memmap(
            cache_file,
            shape=(len(self.ids), max_h, max_w, 3),
            dtype=np.uint8,
            mode="w+",
        )
        from tqdm import tqdm
        from multiprocessing.pool import ThreadPool

        NUM_THREADs = min(8, os.cpu_count())
        loaded_images = ThreadPool(NUM_THREADs).imap(
            lambda x: self.load_resized_img(x),
            range(len(self.annotations)),
        )
        pbar = tqdm(enumerate(loaded_images), total=len(self.annotations))
        for k, out in pbar:
            self.imgs[k][: out.shape[0], : out.shape[1], :] = out.copy()
        self.imgs.flush()
        pbar.close()
    else:
        logger.warning(
            "You are using cached imgs! Make sure your dataset is not changed!!"
        )

    logger.info("Loading cached imgs...")
    self.imgs = np.memmap(
        cache_file,
        shape=(len(self.ids), max_h, max_w, 3),
        dtype=np.uint8,
        mode="r+",
    )

def load_anno_from_ids(self, index):
    img_id = self.ids[index]
    target = ET.parse(self._annopath % img_id).getroot()

    assert self.target_transform is not None
    res, img_info = self.target_transform(target)
    height, width = img_info

    r = min(self.img_size[0] / height, self.img_size[1] / width)
    res[:, :4] *= r
    resized_info = (int(height * r), int(width * r))

    return (res, img_info, resized_info)

def load_anno(self, index):
    return self.annotations[index][0]

def load_resized_img(self, index):
    img = self.load_image(index)
    r = min(self.img_size[0] / img.shape[0], self.img_size[1] / img.shape[1])
    resized_img = cv2.resize(
        img,
        (int(img.shape[1] * r), int(img.shape[0] * r)),
        interpolation=cv2.INTER_LINEAR,
    ).astype(np.uint8)

    return resized_img

def load_image(self, index):
    img_id = self.ids[index]
    img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
    assert img is not None

    return img

def pull_item(self, index):
    """Returns the original image and target at an index for mixup

    Note: not using self.__getitem__(), as any transformations passed in
    could mess up this functionality.

    Argument:
        index (int): index of img to show
    Return:
        img, target
    """
    if self.imgs is not None:
        target, img_info, resized_info = self.annotations[index]
        pad_img = self.imgs[index]
        img = pad_img[: resized_info[0], : resized_info[1], :].copy()
    else:
        img = self.load_resized_img(index)
        target, img_info, _ = self.annotations[index]

    return img, target, img_info, index

@Dataset.mosaic_getitem
def __getitem__(self, index):
    img, target, img_info, img_id = self.pull_item(index)

    if self.preproc is not None:
        img, target = self.preproc(img, target, self.input_dim)

    return img, target, img_info, img_id

def evaluate_detections(self, all_boxes, output_dir=None):
    """
    all_boxes is a list of length number-of-classes.
    Each list element is a list of length number-of-images.
    Each of those list elements is either an empty list []
    or a numpy array of detection.

    all_boxes[class][image] = [] or np.array of shape #dets x 5
    """
    self._write_voc_results_file(all_boxes)
    IouTh = np.linspace(
        0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True
    )
    mAPs = []
    for iou in IouTh:
        mAP = self._do_python_eval(output_dir, iou)
        mAPs.append(mAP)

    print("--------------------------------------------------------------")
    print("map_5095:", np.mean(mAPs))
    print("map_50:", mAPs[0])
    print("--------------------------------------------------------------")
    return np.mean(mAPs), mAPs[0]

def _get_voc_results_file_template(self):
    filename = "comp4_det_test" + "_{:s}.txt"
    filedir = os.path.join(self.root, "results", "VOC" + self._year, "Main")
    if not os.path.exists(filedir):
        os.makedirs(filedir)
    path = os.path.join(filedir, filename)
    return path

def _write_voc_results_file(self, all_boxes):
    for cls_ind, cls in enumerate(VOC_CLASSES):
        cls_ind = cls_ind
        if cls == "__background__":
            continue
        print("Writing {} VOC results file".format(cls))
        filename = self._get_voc_results_file_template().format(cls)
        with open(filename, "wt") as f:
            for im_ind, index in enumerate(self.ids):
                index = index[1]
                dets = all_boxes[cls_ind][im_ind]
                if dets == []:
                    continue
                for k in range(dets.shape[0]):
                    f.write(
                        "{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n".format(
                            index,
                            dets[k, -1],
                            dets[k, 0] + 1,
                            dets[k, 1] + 1,
                            dets[k, 2] + 1,
                            dets[k, 3] + 1,
                        )
                    )

def _do_python_eval(self, output_dir="output", iou=0.5):
    rootpath = os.path.join(self.root, "VOC" + self._year)
    name = self.image_set[0][1]
    annopath = os.path.join(rootpath, "Annotations", "{:s}.xml")
    imagesetfile = os.path.join(rootpath, "ImageSets", "Main", name + ".txt")
    cachedir = os.path.join(
        self.root, "annotations_cache", "VOC" + self._year, name
    )
    if not os.path.exists(cachedir):
        os.makedirs(cachedir)
    aps = []
    # The PASCAL VOC metric changed in 2010
    use_07_metric = True if int(self._year) < 2010 else False
    print("Eval IoU : {:.2f}".format(iou))
    if output_dir is not None and not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    for i, cls in enumerate(VOC_CLASSES):

        if cls == "__background__":
            continue

        filename = self._get_voc_results_file_template().format(cls)
        rec, prec, ap = voc_eval(
            filename,
            annopath,
            imagesetfile,
            cls,
            cachedir,
            ovthresh=iou,
            use_07_metric=use_07_metric,
        )
        aps += [ap]
        if iou == 0.5:
            print("AP for {} = {:.4f}".format(cls, ap))
        if output_dir is not None:
            with open(os.path.join(output_dir, cls + "_pr.pkl"), "wb") as f:
                pickle.dump({"rec": rec, "prec": prec, "ap": ap}, f)
    if iou == 0.5:
        print("Mean AP = {:.4f}".format(np.mean(aps)))
        print("~~~~~~~~")
        print("Results:")
        for ap in aps:
            print("{:.3f}".format(ap))
        print("{:.3f}".format(np.mean(aps)))
        print("~~~~~~~~")
        print("")
        print("--------------------------------------------------------------")
        print("Results computed with the **unofficial** Python eval code.")
        print("Results should be very close to the official MATLAB eval code.")
        print("Recompute with `./tools/reval.py --matlab ...` for your paper.")
        print("-- Thanks, The Management")
        print("--------------------------------------------------------------")

    return np.mean(aps)
FateScript commented 3 years ago

assert img is not None means your datalodaer did not get your input data, please check carefully.

milena-andreuzo commented 3 years ago

Had this problem and it was solved by changing the path to the folder where my images actually are, in my case I made the change on coco.py inside YOLOX/yolox/data/datasets/, for VOC dataset I guess you have to edit the path in voc.py..

DLsnowman commented 3 years ago

I train coco2017. and meet the same problem

hudaodao69 commented 3 years ago

Had this problem and it was solved by changing the path to the folder where my images actually are, in my case I made the change on coco.py inside YOLOX/yolox/data/datasets/, for VOC dataset I guess you have to edit the path in voc.py..

I met the same problem on my custom dataset in COCO format. Could you explain your solution in more detail?Thx!

chenhc88 commented 2 years ago

I had the same problem on VOC dataset. Could you explain how to do?

improgress commented 2 years ago

look your format of data, such as png or jpg?

yanzhuangzhuang123 commented 2 years ago

I had the same problem on COCO dataset ,How to solved this question?