Closed 0tist closed 4 years ago
As I removed the callback .to_fp16(), the error was gone
Yes, because as the stack trace indicated, the error comes from
Mixed-precision training requires a GPU, remove the call to_fp16"
the rest is just a consequence of that. You can't use to_fp16
if you don't have a GPU, this is unrelated to fastprogress.
from the 10_nlp.ipynb, trying to fine tune the language model
AssertionError Traceback (most recent call last) /opt/conda/lib/python3.7/site-packages/fastai2/learner.py in fit(self, n_epoch, lr, wd, cbs, reset_opt) 187 try: --> 188 self._do_begin_fit(n_epoch) 189 for epoch in range(n_epoch):
/opt/conda/lib/python3.7/site-packages/fastai2/learner.py in _do_begin_fit(self, n_epoch) 159 def _do_begin_fit(self, n_epoch): --> 160 self.n_epoch,self.loss = n_epoch,tensor(0.); self('begin_fit') 161
/opt/conda/lib/python3.7/site-packages/fastai2/learner.py in call(self, event_name) 123 --> 124 def call(self, event_name): L(event_name).map(self._call_one) 125 def _call_one(self, event_name):
/opt/conda/lib/python3.7/site-packages/fastcore/foundation.py in map(self, f, *args, **kwargs) 371 else f.getitem) --> 372 return self._new(map(g, self)) 373
/opt/conda/lib/python3.7/site-packages/fastcore/foundation.py in _new(self, items, *args, kwargs) 322 def _xtra(self): return None --> 323 def _new(self, items, *args, *kwargs): return type(self)(items, args, use_list=None, kwargs) 324 def getitem(self, idx): return self._get(idx) if is_indexer(idx) else L(self._get(idx), use_list=None)
/opt/conda/lib/python3.7/site-packages/fastcore/foundation.py in call(cls, x, *args, *kwargs) 40 ---> 41 res = super().call(((x,) + args), **kwargs) 42 res._newchk = 0
/opt/conda/lib/python3.7/site-packages/fastcore/foundation.py in init(self, items, use_list, match, *rest) 313 if (use_list is not None) or not _is_array(items): --> 314 items = list(items) if use_list else _listify(items) 315 if match is not None:
/opt/conda/lib/python3.7/site-packages/fastcore/foundation.py in _listify(o) 249 if isinstance(o, str) or _is_array(o): return [o] --> 250 if is_iter(o): return list(o) 251 return [o]
/opt/conda/lib/python3.7/site-packages/fastcore/foundation.py in call(self, *args, *kwargs) 215 fargs = [args[x.i] if isinstance(x, _Arg) else x for x in self.pargs] + args[self.maxi+1:] --> 216 return self.fn(fargs, **kwargs) 217
/opt/conda/lib/python3.7/site-packages/fastai2/learner.py in _call_one(self, event_name) 126 assert hasattr(event, event_name) --> 127 [cb(event_name) for cb in sort_by_run(self.cbs)] 128
/opt/conda/lib/python3.7/site-packages/fastai2/learner.py in(.0)
126 assert hasattr(event, event_name)
--> 127 [cb(event_name) for cb in sort_by_run(self.cbs)]
128
/opt/conda/lib/python3.7/site-packages/fastai2/callback/core.py in call(self, event_name) 23 (self.run_valid and not getattr(self, 'training', False))) ---> 24 if self.run and _run: getattr(self, event_name, noop)() 25 if event_name=='after_fit': self.run=True #Reset self.run to True at each end of fit
/opt/conda/lib/python3.7/site-packages/fastai2/callback/fp16.py in begin_fit(self) 83 def begin_fit(self): ---> 84 assert self.dls.device.type == 'cuda', "Mixed-precision training requires a GPU, remove the call
to_fp16
" 85 if self.learn.opt is None: self.learn.create_opt()AssertionError: Mixed-precision training requires a GPU, remove the call
to_fp16
During handling of the above exception, another exception occurred:
AttributeError Traceback (most recent call last)
' + self.text --> 157 self.out.update(HTML(self.text)) 158 159 def add_child(self, child): AttributeError: 'NBMasterBar' object has no attribute 'out'