<---- Training Params ---->
Namespace(RFB_aggregated_channel=[32, 64, 128], action='train', arch='7', aug_ver=1, batch_size=8, channels=[24, 40, 112, 320], clipping=2, criterion='API', data_path='/home/nagas/TRACER/data', dataset='DUTS', denoise=0.93, epochs=100, exp_num=0, frequency_radius=16, gamma=0.1, img_size=640, lr=5e-05, lr_factor=0.1, model_path='/home/nagas/TRACER/results', multi_gpu=True, num_workers=4, optimizer='Adam', patience=5, save_map=None, scheduler='Reduce', seed=42, weight_decay=0.0001)
train length : 42
val length : 3
Loaded pretrained weights for efficientnet-b7
0%| | 0/5 [00:00<?, ?it/s]ERROR: Unexpected segmentation fault encountered in worker.
ERROR: Unexpected segmentation fault encountered in worker.
ERROR: Unexpected segmentation fault encountered in worker.
ERROR: Unexpected segmentation fault encountered in worker.
0%| | 0/5 [00:00<?, ?it/s]
Traceback (most recent call last):
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 990, in _try_get_data
data = self._data_queue.get(timeout=timeout)
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/multiprocessing/queues.py", line 104, in get
if not self._poll(timeout):
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/multiprocessing/connection.py", line 257, in poll
return self._poll(timeout)
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/multiprocessing/connection.py", line 414, in _poll
r = wait([self], timeout)
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/multiprocessing/connection.py", line 921, in wait
ready = selector.select(timeout)
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/selectors.py", line 415, in select
fd_event_list = self._selector.poll(timeout)
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/site-packages/torch/utils/data/_utils/signal_handling.py", line 66, in handler
_error_if_any_worker_fails()
RuntimeError: DataLoader worker (pid 20604) is killed by signal: Segmentation fault.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "main.py", line 56, in
main(args)
File "main.py", line 35, in main
Trainer(args, save_path)
File "/home/nagas/TRACER/trainer.py", line 56, in init
train_loss, train_mae = self.training(args)
File "/home/nagas/TRACER/trainer.py", line 101, in training
for images, masks, edges in tqdm(self.train_loader):
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/site-packages/tqdm/std.py", line 1185, in iter
for obj in iterable:
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 521, in next
data = self._next_data()
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 1186, in _next_data
idx, data = self._get_data()
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 1152, in _get_data
success, data = self._try_get_data()
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 1003, in _try_get_data
raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str)) from e
RuntimeError: DataLoader worker (pid(s) 20604) exited unexpectedly
Hi, I'm getting this error. @Karel911
<---- Training Params ----> Namespace(RFB_aggregated_channel=[32, 64, 128], action='train', arch='7', aug_ver=1, batch_size=8, channels=[24, 40, 112, 320], clipping=2, criterion='API', data_path='/home/nagas/TRACER/data', dataset='DUTS', denoise=0.93, epochs=100, exp_num=0, frequency_radius=16, gamma=0.1, img_size=640, lr=5e-05, lr_factor=0.1, model_path='/home/nagas/TRACER/results', multi_gpu=True, num_workers=4, optimizer='Adam', patience=5, save_map=None, scheduler='Reduce', seed=42, weight_decay=0.0001) train length : 42 val length : 3 Loaded pretrained weights for efficientnet-b7 0%| | 0/5 [00:00<?, ?it/s]ERROR: Unexpected segmentation fault encountered in worker. ERROR: Unexpected segmentation fault encountered in worker. ERROR: Unexpected segmentation fault encountered in worker. ERROR: Unexpected segmentation fault encountered in worker. 0%| | 0/5 [00:00<?, ?it/s] Traceback (most recent call last): File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 990, in _try_get_data data = self._data_queue.get(timeout=timeout) File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/multiprocessing/queues.py", line 104, in get if not self._poll(timeout): File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/multiprocessing/connection.py", line 257, in poll return self._poll(timeout) File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/multiprocessing/connection.py", line 414, in _poll r = wait([self], timeout) File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/multiprocessing/connection.py", line 921, in wait ready = selector.select(timeout) File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/selectors.py", line 415, in select fd_event_list = self._selector.poll(timeout) File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/site-packages/torch/utils/data/_utils/signal_handling.py", line 66, in handler _error_if_any_worker_fails() RuntimeError: DataLoader worker (pid 20604) is killed by signal: Segmentation fault.
The above exception was the direct cause of the following exception:
Traceback (most recent call last): File "main.py", line 56, in
main(args)
File "main.py", line 35, in main
Trainer(args, save_path)
File "/home/nagas/TRACER/trainer.py", line 56, in init
train_loss, train_mae = self.training(args)
File "/home/nagas/TRACER/trainer.py", line 101, in training
for images, masks, edges in tqdm(self.train_loader):
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/site-packages/tqdm/std.py", line 1185, in iter
for obj in iterable:
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 521, in next
data = self._next_data()
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 1186, in _next_data
idx, data = self._get_data()
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 1152, in _get_data
success, data = self._try_get_data()
File "/home/nagas/miniconda3/envs/tracer/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 1003, in _try_get_data
raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str)) from e
RuntimeError: DataLoader worker (pid(s) 20604) exited unexpectedly