Closed radekosmulski closed 4 years ago
Is this fixed by the no_grad Jeremy added?
It fixes part of the issue - get_preds is no longer memory hungry :)
Unfortunately the error remains:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-19-4975c3720d00> in <module>
----> 1 preds = learn.get_preds(dl=tst_dl)
~/work/fastai_dev/dev/local/learner.py in get_preds(self, ds_idx, dl, with_input, with_loss, decoded, act)
266 with self.no_logging(), self.added_cbs(cb), self.loss_not_reduced():
267 self(_before_epoch)
--> 268 self._do_epoch_validate(ds_idx, dl)
269 self(_after_epoch)
270 if act is None: act = getattr(self.loss_func, 'activation', noop)
~/work/fastai_dev/dev/local/learner.py in _do_epoch_validate(self, ds_idx, dl)
232 with torch.no_grad(): self.all_batches()
233 except CancelValidException: self('after_cancel_validate')
--> 234 finally: self('after_validate')
235
236 def fit(self, n_epoch, lr=None, wd=defaults.wd, cbs=None, reset_opt=False):
~/work/fastai_dev/dev/local/learner.py in __call__(self, event_name)
181 self.remove_cbs(cbs)
182
--> 183 def __call__(self, event_name): L(event_name).map(self._call_one)
184 def _call_one(self, event_name):
185 assert hasattr(event, event_name)
~/work/fastai_dev/dev/local/core.py in map(self, f, *args, **kwargs)
333 else f.format if isinstance(f,str)
334 else f.__getitem__)
--> 335 return self._new(map(g, self))
336
337 def unique(self): return L(dict.fromkeys(self).keys())
~/work/fastai_dev/dev/local/core.py in _new(self, items, *args, **kwargs)
287 super().__init__(items)
288
--> 289 def _new(self, items, *args, **kwargs): return type(self)(items, *args, use_list=None, **kwargs)
290 def __getitem__(self, idx): return self._get(idx) if is_indexer(idx) else L(self._get(idx), use_list=None)
291
~/work/fastai_dev/dev/local/core.py in __call__(cls, x, *args, **kwargs)
48 return x
49
---> 50 res = super().__call__(*((x,) + args), **kwargs)
51 res._newchk = 0
52 return res
~/work/fastai_dev/dev/local/core.py in __init__(self, items, use_list, match, *rest)
281 if items is None: items = []
282 if (use_list is not None) or not _is_array(items):
--> 283 items = list(items) if use_list else _listify(items)
284 if match is not None:
285 if len(items)==1: items = items*len(match)
~/work/fastai_dev/dev/local/core.py in _listify(o)
225 if isinstance(o, list): return o
226 if isinstance(o, str) or _is_array(o): return [o]
--> 227 if is_iter(o): return list(o)
228 return [o]
229
~/work/fastai_dev/dev/local/core.py in __call__(self, *args, **kwargs)
197 if isinstance(v,_Arg): kwargs[k] = args.pop(v.i)
198 fargs = [args[x.i] if isinstance(x, _Arg) else x for x in self.pargs] + args[self.maxi+1:]
--> 199 return self.fn(*fargs, **kwargs)
200
201 #Cell
~/work/fastai_dev/dev/local/learner.py in _call_one(self, event_name)
184 def _call_one(self, event_name):
185 assert hasattr(event, event_name)
--> 186 [cb(event_name) for cb in sort_by_run(self.cbs)]
187
188 def _bn_bias_state(self, with_bias): return bn_bias_params(self.model, with_bias).map(self.opt.state)
~/work/fastai_dev/dev/local/learner.py in <listcomp>(.0)
184 def _call_one(self, event_name):
185 assert hasattr(event, event_name)
--> 186 [cb(event_name) for cb in sort_by_run(self.cbs)]
187
188 def _bn_bias_state(self, with_bias): return bn_bias_params(self.model, with_bias).map(self.opt.state)
~/work/fastai_dev/dev/local/learner.py in __call__(self, event_name)
26 def __call__(self, event_name):
27 "Call `self.{event_name}` if it's defined"
---> 28 getattr(self, event_name, noop)()
29
30 @property
~/work/fastai_dev/dev/local/learner.py in after_validate(self)
450 def begin_validate(self): self._valid_mets.map(Self.reset())
451 def after_train (self): self.log += self._train_mets.map(_maybe_item)
--> 452 def after_validate(self): self.log += self._valid_mets.map(_maybe_item)
453 def after_cancel_train(self): self.cancel_train = True
454 def after_cancel_validate(self): self.cancel_valid = True
~/work/fastai_dev/dev/local/core.py in map(self, f, *args, **kwargs)
333 else f.format if isinstance(f,str)
334 else f.__getitem__)
--> 335 return self._new(map(g, self))
336
337 def unique(self): return L(dict.fromkeys(self).keys())
~/work/fastai_dev/dev/local/core.py in _new(self, items, *args, **kwargs)
287 super().__init__(items)
288
--> 289 def _new(self, items, *args, **kwargs): return type(self)(items, *args, use_list=None, **kwargs)
290 def __getitem__(self, idx): return self._get(idx) if is_indexer(idx) else L(self._get(idx), use_list=None)
291
~/work/fastai_dev/dev/local/core.py in __call__(cls, x, *args, **kwargs)
48 return x
49
---> 50 res = super().__call__(*((x,) + args), **kwargs)
51 res._newchk = 0
52 return res
~/work/fastai_dev/dev/local/core.py in __init__(self, items, use_list, match, *rest)
281 if items is None: items = []
282 if (use_list is not None) or not _is_array(items):
--> 283 items = list(items) if use_list else _listify(items)
284 if match is not None:
285 if len(items)==1: items = items*len(match)
~/work/fastai_dev/dev/local/core.py in _listify(o)
225 if isinstance(o, list): return o
226 if isinstance(o, str) or _is_array(o): return [o]
--> 227 if is_iter(o): return list(o)
228 return [o]
229
~/work/fastai_dev/dev/local/core.py in __call__(self, *args, **kwargs)
197 if isinstance(v,_Arg): kwargs[k] = args.pop(v.i)
198 fargs = [args[x.i] if isinstance(x, _Arg) else x for x in self.pargs] + args[self.maxi+1:]
--> 199 return self.fn(*fargs, **kwargs)
200
201 #Cell
~/work/fastai_dev/dev/local/learner.py in _maybe_item(t)
409
410 def _maybe_item(t):
--> 411 t = t.value
412 return t.item() if isinstance(t, Tensor) and t.numel()==1 else t
413
~/work/fastai_dev/dev/local/metrics.py in value(self)
40 @property
41 def value(self):
---> 42 preds,targs = torch.cat(self.preds),torch.cat(self.targs)
43 if self.to_np: preds,targs = preds.numpy(),targs.numpy()
44 return self.func(targs, preds, **self.kwargs) if self.invert_args else self.func(preds, targs, **self.kwargs)
RuntimeError: expected a non-empty list of Tensors
Should be fixed now.
Memory consumption is high regarding which dl I pass. I am using the following list of metrics:
[accuracy_multi, PrecisionMulti(), RecallMulti()]
This is the error I receive: