zalandoresearch / pytorch-ts

PyTorch based Probabilistic Time Series forecasting framework based on GluonTS backend
MIT License
1.21k stars 190 forks source link

when i try to your Quick start,i have some trouble #35

Open wubmu opened 3 years ago

wubmu commented 3 years ago

BrokenPipeError Traceback (most recent call last)

in 6 trainer=Trainer(epochs=10, 7 device=device)) ----> 8 predictor = estimator.train(training_data=training_data) D:\software\anaconda\envs\tensorflow\lib\site-packages\pts\model\estimator.py in train(self, training_data) 146 147 def train(self, training_data: Dataset) -> Predictor: --> 148 return self.train_model(training_data).predictor D:\software\anaconda\envs\tensorflow\lib\site-packages\pts\model\estimator.py in train_model(self, training_data) 134 net=trained_net, 135 input_names=get_module_forward_input_names(trained_net), --> 136 data_loader=training_data_loader, 137 ) 138 D:\software\anaconda\envs\tensorflow\lib\site-packages\pts\trainer.py in __call__(self, net, input_names, data_loader) 46 47 with tqdm(data_loader) as it: ---> 48 for batch_no, data_entry in enumerate(it, start=1): 49 optimizer.zero_grad() 50 inputs = [data_entry[k].to(self.device) for k in input_names] D:\software\anaconda\envs\tensorflow\lib\site-packages\tqdm\std.py in __iter__(self) 1163 1164 try: -> 1165 for obj in iterable: 1166 yield obj 1167 # Update and possibly print the progressbar. D:\software\anaconda\envs\tensorflow\lib\site-packages\torch\utils\data\dataloader.py in __iter__(self) 289 return _SingleProcessDataLoaderIter(self) 290 else: --> 291 return _MultiProcessingDataLoaderIter(self) 292 293 @property D:\software\anaconda\envs\tensorflow\lib\site-packages\torch\utils\data\dataloader.py in __init__(self, loader) 735 # before it starts, and __del__ tries to join but will get: 736 # AssertionError: can only join a started process. --> 737 w.start() 738 self._index_queues.append(index_queue) 739 self._workers.append(w) D:\software\anaconda\envs\tensorflow\lib\multiprocessing\process.py in start(self) 103 'daemonic processes are not allowed to have children' 104 _cleanup() --> 105 self._popen = self._Popen(self) 106 self._sentinel = self._popen.sentinel 107 # Avoid a refcycle if the target function holds an indirect D:\software\anaconda\envs\tensorflow\lib\multiprocessing\context.py in _Popen(process_obj) 221 @staticmethod 222 def _Popen(process_obj): --> 223 return _default_context.get_context().Process._Popen(process_obj) 224 225 class DefaultContext(BaseContext): D:\software\anaconda\envs\tensorflow\lib\multiprocessing\context.py in _Popen(process_obj) 320 def _Popen(process_obj): 321 from .popen_spawn_win32 import Popen --> 322 return Popen(process_obj) 323 324 class SpawnContext(BaseContext): D:\software\anaconda\envs\tensorflow\lib\multiprocessing\popen_spawn_win32.py in __init__(self, process_obj) 63 try: 64 reduction.dump(prep_data, to_child) ---> 65 reduction.dump(process_obj, to_child) 66 finally: 67 set_spawning_popen(None) D:\software\anaconda\envs\tensorflow\lib\multiprocessing\reduction.py in dump(obj, file, protocol) 58 def dump(obj, file, protocol=None): 59 '''Replacement for pickle.dump() using ForkingPickler.''' ---> 60 ForkingPickler(file, protocol).dump(obj) 61 62 # BrokenPipeError: [Errno 32] Broken pipe
kashif commented 3 years ago

seems to be an in issue with multiprocessing on windows... can you kindly try with num_workers=None ?

cmapz2 commented 2 years ago

Thanks it worked!