mdhmz1 / Auto-Annotate

Auto-Annotate - Automatically annotate your entire image directory by a single command. As simple as saying - "Annotate all the street sign (label) in the autonomous car dataset (directory)" and BAM! DONE. Each and every image with a street sign in the diverse dataset directory containing images of all sorts which have a street sign are filtered and the segmentation annotation is performed in a single command. The Auto-Annotate tool provides auto annotation of segmentation masks for the objects in the images inside some directory based on the labels. Auto-Annotate is able to provide automated annotations for the labels defined in the COCO Dataset and also supports Custom Labels.
MIT License
180 stars 28 forks source link

TypeError: list indices must be integers or slices not str Issue not resolving #14

Closed nnuk closed 2 years ago

nnuk commented 2 years ago

I have tried to custom train my own dataset which has it own COCO JSON format file.

When I try to run "python3 customTrain.py train --dataset=path/to/dir --weights=coco" I get the following error:

Traceback (most recent call last): File "customTrain.py", line 279, in train(model) File "customTrain.py", line 179, in train dataset_train.load_custom(args.dataset, "train") File "customTrain.py", line 87, in load_custom annotations = [a for a in annotations if a['regions']] File "customTrain.py", line 87, in annotations = [a for a in annotations if a['regions']] TypeError: list indices must be integers or slices, not str

My customtrain.py looks like the following:

`import os import sys import json import datetime import numpy as np import skimage.draw

Root directory of the project ROOT_DIR = "/home/hiwi/Auto-Annotate"

Import Mask RCNN sys.path.append(ROOT_DIR) # To find local version of the library from mrcnn.config import Config from mrcnn import model as modellib, utils

Path to trained weights file COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")

Directory to save logs and model checkpoints, if not provided through the command line argument --logs DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")

############################################################

Configurations ############################################################

class CustomConfig(Config): """Configuration for training on the toy dataset. Derives from the base Config class and overrides some values. """

Give the configuration a recognizable name

NAME = "custom"

IMAGES_PER_GPU = 1

Number of classes (including background)

NUM_CLASSES = 1 + 2 # Background + 2 classes

Number of training steps per epoch

STEPS_PER_EPOCH = 100

Skip detections with < 90% confidence

DETECTION_MIN_CONFIDENCE = 0.9 ############################################################

Dataset ############################################################

class CustomDataset(utils.Dataset):

def load_custom(self, dataset_dir, subset): """Load a subset of the Custom dataset. dataset_dir: Root directory of the dataset. subset: Subset to load: train or val """

Add classes. We have only one class to add.

self.add_class("custom", 0, "Primary_Track")
self.add_class("custom", 1, "Secondary_Track")

# Train or validation dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)

# Load annotations
# VGG Image Annotator (up to version 1.6) saves each image in the form:
# { 'filename': '28503151_5b5b7ec140_b.jpg',
#   'regions': {
#       '0': {
#           'region_attributes': {},
#           'shape_attributes': {
#               'all_points_x': [...],
#               'all_points_y': [...],
#               'name': 'polygon'}},
#       ... more regions ...
#   },
#   'size': 100202
# }
# We mostly care about the x and y coordinates of each region
# Note: In VIA 2.0, regions was changed from a dict to a list.
annotations1 = json.load(open(os.path.join(dataset_dir, "train.json")))
annotations = list(annotations1.values())  # don't need the dict keys

# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]

# Add images
for a in annotations:
    # Get the x, y coordinaets of points of the polygons that make up
    # the outline of each object instance. These are stores in the
    # shape_attributes (see json format above)
    # The if condition is needed to support VIA versions 1.x and 2.x.
    if type(a['regions']) is dict:
        polygons = [r['shape_attributes'] for r in a['regions'].values()]
    else:
        polygons = [r['shape_attributes'] for r in a['regions']]

    #labelling each class in the given image to a number 

    custom = [s['region_attributes'] for s in a['regions']]

    num_ids=[]
    #Add the classes according to the requirement
    for n in custom:
        try:
            if n['name']=="Primary_Track":
                num_ids.append(0)
            elif n['name']=='Secondary_Track':
                num_ids.append(1)
        except:
            pass

    # load_mask() needs the image size to convert polygons to masks.
    # Unfortunately, VIA doesn't include it in JSON, so we must read
    # the image. This is only managable since the dataset is tiny.
    image_path = os.path.join(dataset_dir, a['filename'])
    image = skimage.io.imread(image_path)
    height, width = image.shape[:2]

    self.add_image(
        "custom",
        image_id=a['filename'],  # use file name as a unique image id
        path=image_path,
        width=width, height=height,
        polygons=polygons,
        num_ids=num_ids)

def load_mask(self, image_id): """Generate instance masks for an image. Returns: masks: A bool array of shape [height, width, instance count] with one mask per instance. class_ids: a 1D array of class IDs of the instance masks. """

If not a custom dataset image, delegate to parent class.

image_info = self.image_info[image_id]
if image_info["source"] != "custom":
    return super(self.__class__, self).load_mask(image_id)
num_ids = image_info['num_ids']
#print("Here is the numID",num_ids)

# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
                dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
    if p['name'] == 'polygon':
    # Get indexes of pixels inside the polygon and set them to 1
        rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
    else:
        rr, cc = skimage.draw.rectangle((p['y'], p['x']), extent=(p['height'], p['width']))

    rr[rr > mask.shape[0]-1] = mask.shape[0]-1
    cc[cc > mask.shape[1]-1] = mask.shape[1]-1
    mask[rr, cc, i] = 1

# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
num_ids = np.array(num_ids, dtype=np.int32)
return mask.astype(np.bool), num_ids.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
#return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)

def image_reference(self, image_id): """Return the path of the image.""" info = self.image_info[image_id] if info["source"] == "Railtrack": return info["path"] else: super(self.class, self).image_reference(image_id) def train(model): """Train the model."""

Training dataset.

dataset_train = CustomDataset() dataset_train.load_custom(args.dataset, "train") dataset_train.prepare()

Validation dataset

dataset_val = CustomDataset() dataset_val.load_custom(args.dataset, "val") dataset_val.prepare()

This training schedule is an example. Update to your needs

Since we're using a very small dataset, and starting from

COCO trained weights, we don't need to train too long. Also,

no need to train all layers, just the heads should do it.

print("Training network heads") model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=30, layers='heads') ############################################################

Training ############################################################

if name == 'main': import argparse

Parse command line arguments

parser = argparse.ArgumentParser( description='Train Mask R-CNN to detect custom objects.') parser.add_argument("command", metavar="", help="'train' or 'splash'") parser.add_argument('--dataset', required=False, metavar="/path/to/custom/dataset/", help='Directory of the Custom dataset') parser.add_argument('--weights', required=True, metavar="/path/to/weights.h5", help="Path to weights .h5 file or 'coco'") parser.add_argument('--logs', required=False, default=DEFAULT_LOGS_DIR, metavar="/path/to/logs/", help='Logs and checkpoints directory (default=logs/)') parser.add_argument('--image', required=False, metavar="path or URL to image", help='Image to apply the color splash effect on') parser.add_argument('--video', required=False, metavar="path or URL to video", help='Video to apply the color splash effect on') args = parser.parse_args()

Validate arguments

if args.command == "train": assert args.dataset, "Argument --dataset is required for training" elif args.command == "splash": assert args.image or args.video,\ "Provide --image or --video to apply color splash"

print("Weights: ", args.weights) print("Dataset: ", args.dataset) print("Logs: ", args.logs)

Configurations

if args.command == "train": config = CustomConfig()

Create model

if args.command == "train": model = modellib.MaskRCNN(mode="training", config=config, model_dir=args.logs)

Select weights file to load

if args.weights.lower() == "coco": weights_path = COCO_WEIGHTS_PATH

Download weights file

if not os.path.exists(weights_path):
    utils.download_trained_weights(weights_path)

elif args.weights.lower() == "last":

Find last trained weights

weights_path = model.find_last()

elif args.weights.lower() == "imagenet":

Start from ImageNet trained weights

weights_path = model.get_imagenet_weights()

else: weights_path = args.weights

Load weights

print("Loading weights ", weights_path) if args.weights.lower() == "coco":

Exclude the last layers because they require a matching

# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
    "mrcnn_class_logits", "mrcnn_bbox_fc",
    "mrcnn_bbox", "mrcnn_mask"])

else: model.load_weights(weights_path, by_name=True)

Train or evaluate

if args.command == "train": train(model) else: print("'{}' is not recognized. " "Use 'train' or 'splash'".format(args.command))`

Kindly help.

mdhmz1 commented 2 years ago

There is some problem with your "train.json" that you created for custom dataset after annotation of custom images. annotations1 = json.load(open(os.path.join(dataset_dir, "train.json")))

Please refer to this section in order to correctly generate the json file: Auto-Annotate#training-on-your-own-dataset