I'm facing the following issue after changing DATASETS: DETECT_CLASSES with 10 classes.
/pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:60: lambda ->auto::operator()(int)->auto: block: [1,0,0], thread: [111,0,0] Assertion index >= -sizes[i] && index < sizes[i] && "index out of bounds" failed.
/pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:60: lambda ->auto::operator()(int)->auto: block: [0,0,0], thread: [57,0,0] Assertion index >= -sizes[i] && index < sizes[i] && "index out of bounds" failed.
/pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:60: lambda ->auto::operator()(int)->auto: block: [0,0,0], thread: [58,0,0] Assertion index >= -sizes[i] && index < sizes[i] && "index out of bounds" failed.
/pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:60: lambda ->auto::operator()(int)->auto: block: [0,0,0], thread: [59,0,0] Assertion index >= -sizes[i] && index < sizes[i] && "index out of bounds" failed.
/pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:60: lambda ->auto::operator()(int)->auto: block: [0,0,0], thread: [60,0,0] Assertion index >= -sizes[i] && index < sizes[i] && "index out of bounds" failed.
/pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:60: lambda ->auto::operator()(int)->auto: block: [0,0,0], thread: [61,0,0] Assertion index >= -sizes[i] && index < sizes[i] && "index out of bounds" failed.
/pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:60: lambda ->auto::operator()(int)->auto: block: [0,0,0], thread: [62,0,0] Assertion index >= -sizes[i] && index < sizes[i] && "index out of bounds" failed.
Traceback (most recent call last):
File "tools/plain_train_net.py", line 104, in
args=(args,),
File "/fs/scratch/.xcserver_ai-initiative_backup2021/zfe5szh/SMOKE/smoke/engine/launch.py", line 56, in launch
main_func(args)
File "tools/plain_train_net.py", line 92, in main
train(cfg, model, device, distributed)
File "tools/plain_train_net.py", line 55, in train
arguments
File "/fs/scratch/.xcserver_ai-initiative_backup2021/zfe5szh/SMOKE/smoke/engine/trainer.py", line 69, in do_train
loss_dict = model(images, targets)
File "/home/zfe5szh/.conda/envs/SMOKE/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in call
result = self.forward(input, *kwargs)
File "/fs/scratch/.xcserver_ai-initiative_backup2021/zfe5szh/SMOKE/smoke/modeling/detector/keypoint_detector.py", line 38, in forward
result, detector_losses = self.heads(features, targets)#############################################
File "/home/zfe5szh/.conda/envs/SMOKE/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in call
result = self.forward(input, kwargs)
File "/fs/scratch/.xcserver_ai-initiative_backup2021/zfe5szh/SMOKE/smoke/modeling/heads/smoke_head/smoke_head.py", line 22, in forward
loss_heatmap, loss_regression = self.loss_evaluator(x, targets)
File "/fs/scratch/.xcserver_ai-initiative_backup2021/zfe5szh/SMOKE/smoke/modeling/heads/smoke_head/loss.py", line 117, in call
predict_boxes3d = self.prepare_predictions(targets_variables, pred_regression)
File "/fs/scratch/.xcserver_ai-initiative_backup2021/zfe5szh/SMOKE/smoke/modeling/heads/smoke_head/loss.py", line 79, in prepare_predictions
targets_variables["flip_mask"]
File "/fs/scratch/.xcserver_ai-initiative_backup2021/zfe5szh/SMOKE/smoke/modeling/smoke_coder.py", line 218, in decode_orientation
cos_pos_idx = (vector_ori[:, 1] > 0).nonzero()
RuntimeError: copy_if failed to synchronize: device-side assert triggered**
I'm facing the following issue after changing DATASETS: DETECT_CLASSES with 10 classes.
/pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:60: lambda ->auto::operator()(int)->auto: block: [1,0,0], thread: [111,0,0] Assertion
args=(args,),
File "/fs/scratch/.xcserver_ai-initiative_backup2021/zfe5szh/SMOKE/smoke/engine/launch.py", line 56, in launch
main_func(args)
File "tools/plain_train_net.py", line 92, in main
train(cfg, model, device, distributed)
File "tools/plain_train_net.py", line 55, in train
arguments
File "/fs/scratch/.xcserver_ai-initiative_backup2021/zfe5szh/SMOKE/smoke/engine/trainer.py", line 69, in do_train
loss_dict = model(images, targets)
File "/home/zfe5szh/.conda/envs/SMOKE/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in call
result = self.forward(input, *kwargs)
File "/fs/scratch/.xcserver_ai-initiative_backup2021/zfe5szh/SMOKE/smoke/modeling/detector/keypoint_detector.py", line 38, in forward
result, detector_losses = self.heads(features, targets)#############################################
File "/home/zfe5szh/.conda/envs/SMOKE/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in call
result = self.forward(input, kwargs)
File "/fs/scratch/.xcserver_ai-initiative_backup2021/zfe5szh/SMOKE/smoke/modeling/heads/smoke_head/smoke_head.py", line 22, in forward
loss_heatmap, loss_regression = self.loss_evaluator(x, targets)
File "/fs/scratch/.xcserver_ai-initiative_backup2021/zfe5szh/SMOKE/smoke/modeling/heads/smoke_head/loss.py", line 117, in call
predict_boxes3d = self.prepare_predictions(targets_variables, pred_regression)
File "/fs/scratch/.xcserver_ai-initiative_backup2021/zfe5szh/SMOKE/smoke/modeling/heads/smoke_head/loss.py", line 79, in prepare_predictions
targets_variables["flip_mask"]
File "/fs/scratch/.xcserver_ai-initiative_backup2021/zfe5szh/SMOKE/smoke/modeling/smoke_coder.py", line 218, in decode_orientation
cos_pos_idx = (vector_ori[:, 1] > 0).nonzero()
RuntimeError: copy_if failed to synchronize: device-side assert triggered**
index >= -sizes[i] && index < sizes[i] && "index out of bounds"
failed. /pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:60: lambda ->auto::operator()(int)->auto: block: [0,0,0], thread: [57,0,0] Assertionindex >= -sizes[i] && index < sizes[i] && "index out of bounds"
failed. /pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:60: lambda ->auto::operator()(int)->auto: block: [0,0,0], thread: [58,0,0] Assertionindex >= -sizes[i] && index < sizes[i] && "index out of bounds"
failed. /pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:60: lambda ->auto::operator()(int)->auto: block: [0,0,0], thread: [59,0,0] Assertionindex >= -sizes[i] && index < sizes[i] && "index out of bounds"
failed. /pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:60: lambda ->auto::operator()(int)->auto: block: [0,0,0], thread: [60,0,0] Assertionindex >= -sizes[i] && index < sizes[i] && "index out of bounds"
failed. /pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:60: lambda ->auto::operator()(int)->auto: block: [0,0,0], thread: [61,0,0] Assertionindex >= -sizes[i] && index < sizes[i] && "index out of bounds"
failed. /pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:60: lambda ->auto::operator()(int)->auto: block: [0,0,0], thread: [62,0,0] Assertionindex >= -sizes[i] && index < sizes[i] && "index out of bounds"
failed. Traceback (most recent call last): File "tools/plain_train_net.py", line 104, in