GiovanniPasq / DA-Faster-RCNN

Detectron2 implementation of DA-Faster R-CNN, Domain Adaptive Faster R-CNN for Object Detection in the Wild
MIT License
53 stars 10 forks source link

TypeError: forward() takes 2 positional arguments but 4 were given #3

Closed HarryKang13 closed 1 year ago

HarryKang13 commented 1 year ago

Thanks for your work! When I train the model on my own dataset, I got the error 'TypeError: forward() takes 2 positional arguments but 4 were given' in 'torch/nn/modules/module.py'. I just change the dataset root and did not change any other arguments. I use only one GPU. Would you help me?

HarryKang13 commented 1 year ago

And my pytorch version is 1.8.0

GiovanniPasq commented 1 year ago

Hello, can you give me more details? Did you install detectron2 version 0.6? Did you replace the classes rcnn.py and roi_heads.py with mine?

HarryKang13 commented 1 year ago

Yes,I install detectron2 version 0.6 and replace the rcnn.py and roi_heads.py.My cuda version is 11.3 This is my uda_train.py and I do not change any other files:

from detectron2.utils.logger import setup_logger
setup_logger()
import numpy as np
from detectron2 import model_zoo
from detectron2.config import get_cfg
import logging
import os
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
from detectron2.data import build_detection_test_loader, build_detection_train_loader
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils.events import EventStorage
from detectron2.engine import default_writers
import torch
from detectron2.data.datasets import register_coco_instances, register_pascal_voc

#FOR PASCAL VOC ANNOTATIONS
# register_pascal_voc("city_trainS", "drive/My Drive/cityscape/", "train_s", 2007, ['car','person','rider','truck','bus','train','motorcycle','bicycle'])
# register_pascal_voc("city_trainT", "drive/My Drive/cityscape/", "train_t", 2007, ['car','person','rider','truck','bus','train','motorcycle','bicycle'])

# register_pascal_voc("city_testT", "drive/My Drive/cityscape/", "test_t", 2007, ['car','person','rider','truck','bus','train','motorcycle','bicycle'])

#FOR COCO ANNOTATIONS   
register_coco_instances("Chicago_train", {}, "/home/ubt-346/devdata1/kjn/Datasets/Chicago2Austin/chicago_train.json", "/home/ubt-346/devdata1/kjn/Datasets/Chicago2Austin/chicago")
register_coco_instances("Austin_train", {}, "/home/ubt-346/devdata1/kjn/Datasets/Chicago2Austin/austin_train.json", "/home/ubt-346/devdata1/kjn/Datasets/Chicago2Austin/austin")

register_coco_instances("Austin_test", {}, "/home/ubt-346/devdata1/kjn/Datasets/Chicago2Austin/austin_test.json", "/home/ubt-346/devdata1/kjn/Datasets/Chicago2Austin/austin_test")

logger = logging.getLogger("detectron2")

def do_train(cfg_source, cfg_target, model, resume = False):

    model.train()
    optimizer = build_optimizer(cfg_source, model)
    scheduler = build_lr_scheduler(cfg_source, optimizer)
    checkpointer = DetectionCheckpointer(model, cfg_source.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler)

    start_iter = (checkpointer.resume_or_load(cfg_source.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1)
    max_iter = cfg_source.SOLVER.MAX_ITER

    periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg_source.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter)
    writers = default_writers(cfg_source.OUTPUT_DIR, max_iter) if comm.is_main_process() else []

    data_loader_source = build_detection_train_loader(cfg_source)
    data_loader_target = build_detection_train_loader(cfg_target) 
    logger.info("Starting training from iteration {}".format(start_iter))

    with EventStorage(start_iter) as storage:
        for data_source, data_target, iteration in zip(data_loader_source, data_loader_target, range(start_iter, max_iter)):
            storage.iter = iteration

            loss_dict = model(data_source, False, 0.1)
            loss_dict_target = model(data_target, True, 0.1)

            loss_dict["loss_image_d"] += loss_dict_target["loss_image_d"]
            loss_dict["loss_instance_d"] += loss_dict_target["loss_instance_d"]
            #loss_dict["loss_consistency_d"] += loss_dict_target["loss_consistency_d"]

            loss_dict["loss_image_d"] *= 0.5
            loss_dict["loss_instance_d"] *= 0.5
            #loss_dict["loss_consistency_d"] *= 0.5

            losses = sum(loss_dict.values())
            assert torch.isfinite(losses).all(), loss_dict

            loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}
            losses_reduced = sum(loss for loss in loss_dict_reduced.values())
            if comm.is_main_process():
                storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)

            optimizer.zero_grad()
            losses.backward()
            optimizer.step()
            storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
            scheduler.step()

            if iteration - start_iter > 5 and ((iteration + 1) % 20 == 0 or iteration == max_iter - 1):
                for writer in writers:
                    writer.write()
            periodic_checkpointer.step(iteration)

cfg_source = get_cfg()
cfg_source.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_C4_1x.yaml"))
cfg_source.DATASETS.TRAIN = ("Chicago_train",)
cfg_source.DATALOADER.NUM_WORKERS = 2
cfg_source.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_C4_1x.yaml")
cfg_source.SOLVER.IMS_PER_BATCH = 1
cfg_source.SOLVER.BASE_LR = 0.0002
cfg_source.SOLVER.MAX_ITER = 30
cfg_source.INPUT.MIN_SIZE_TRAIN = (600,)
cfg_source.INPUT.MIN_SIZE_TEST = 0
os.makedirs(cfg_source.OUTPUT_DIR, exist_ok=True)
cfg_source.MODEL.ROI_HEADS.NUM_CLASSES = 1
model = build_model(cfg_source)

cfg_target = get_cfg()
cfg_target.DATASETS.TRAIN = ("Austin_train",)
cfg_target.INPUT.MIN_SIZE_TRAIN = (600,)
cfg_target.DATALOADER.NUM_WORKERS = 0
cfg_target.SOLVER.IMS_PER_BATCH = 1

do_train(cfg_source,cfg_target,model)

#PASCAL VOC evaluation
# from detectron2.evaluation import inference_on_dataset, PascalVOCDetectionEvaluator
# from detectron2.data import build_detection_test_loader
# evaluator = PascalVOCDetectionEvaluator("city_testT")
# val_loader = build_detection_test_loader(cfg_source, "city_testT")
# res = inference_on_dataset(model, val_loader, evaluator)
# print(res)

#COCO evaluation
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
evaluator = COCOEvaluator("Austin_test", cfg_source, False, output_dir="./output/")
val_loader = build_detection_test_loader(cfg_source, "Austin_test")
inference_on_dataset(model, val_loader, evaluator)

There is only one category in my dataset.

GiovanniPasq commented 1 year ago

hi, i have tested my code again and it works, the problem is related to your model, are you sure you replaced the classes? are you using google colab or your pc? If you use Google colab the path is:

../usr/local/lib/python3.7/dist-packages/detectron2/modeling/meta_arch/
../usr/local/lib/python3.7/dist-packages/detectron2/modeling/roi_heads/

NB: the python folder version may vary based on the one you have installed. If you use colab, you need to restart the runtime to update the model before run the training

If you use your pc the path is:

detectron2/modeling/meta_arch/
detectron2/modeling/roi_heads/

Let me know

HarryKang13 commented 1 year ago

Thanks! I used my pc to train the model and I replaced the class in '/home/ubt-346/anaconda3/envs/semi-fcos/lib/python3.8/site-packages/detectron2/modeling/meta_arch/rcnn.py'. and '/home/ubt-346/anaconda3/envs/semi-fcos/lib/python3.8/site-packages/detectron2/modeling/roi_heads/roi_heads.py'.

I also create the folder '/home/ubt-346/anaconda3/envs/semi-fcos/lib/python3.8/site-packages/detectron2/modeling/da_modules' and uploaded the 3 corresponding classes.

GiovanniPasq commented 1 year ago

If you used your pc you should have cloned the detectron2 repo from GitHub. You need to replace the files inside this folder

HarryKang13 commented 1 year ago

OK,I will try it again.Thanks!

GiovanniPasq commented 1 year ago

hello, did you solve your problem? if yes, can i close the issue?