timeseriesAI / tsai

Time series Timeseries Deep Learning Machine Learning Python Pytorch fastai | State-of-the-art Deep Learning library for Time Series and Sequences in Pytorch / fastai
https://timeseriesai.github.io/tsai/
Apache License 2.0
4.92k stars 622 forks source link

learn.show_results() fails for images created by TSToGADF #811

Open E-Penguin opened 11 months ago

E-Penguin commented 11 months ago

I followed the code in the tutorial for encoding time series as images. When I get to learn.show_results(), it fails. The problem appears to be in passing title_color to a TensorImage.

Error:

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
[<ipython-input-25-acf158188d1e>](https://localhost:8080/#) in <cell line: 35>()
     74 
     75     #ipdb.set_trace()
---> 76     learn.show_results()
     77 
     78     learn.show_probas()

11 frames
[/usr/local/lib/python3.10/dist-packages/matplotlib/artist.py](https://localhost:8080/#) in _update_props(self, props, errfmt)
   1195                     func = getattr(self, f"set_{k}", None)
   1196                     if not callable(func):
-> 1197                         raise AttributeError(
   1198                             errfmt.format(cls=type(self), prop_name=k))
   1199                     ret.append(func(v))

AttributeError: AxesImage.set() got an unexpected keyword argument 'title_color'

debug trace:

> /usr/local/lib/python3.10/dist-packages/matplotlib/artist.py(1197)_update_props()
   1195                     func = getattr(self, f"set_{k}", None)
   1196                     if not callable(func):
-> 1197                         raise AttributeError(
   1198                             errfmt.format(cls=type(self), prop_name=k))
   1199                     ret.append(func(v))

ipdb> w
  <ipython-input-25-acf158188d1e>(76)<cell line: 35>()
     74 
     75     #ipdb.set_trace()
---> 76     learn.show_results()
     77 
     78     learn.show_probas()

  /usr/local/lib/python3.10/dist-packages/fastai/learner.py(334)show_results()
    332         b = dl.one_batch()
    333         _,_,preds = self.get_preds(dl=[b], with_decoded=True)
--> 334         dl.show_results(b, preds, max_n=max_n, **kwargs)
    335 
    336     def show_training_loop(self):

  /usr/local/lib/python3.10/dist-packages/tsai/data/core.py(753)show_results()
    751             title = f'True: {t[i][1]}\nPred: {p[i][1]}'
    752             color = 'green' if t[i][1] == p[i][1] else 'red'
--> 753             t[i][0].show(ctx=ctx, title=title, title_color=color)
    754 
    755     @delegates(plt.subplots)

  /usr/local/lib/python3.10/dist-packages/tsai/data/image.py(50)show()
     48             return
     49         else:
---> 50             TensorImage(self[:3]).show(**kwargs)
     51             return
     52 

  /usr/local/lib/python3.10/dist-packages/fastai/torch_core.py(430)show()
    428     _show_args = ArrayImageBase._show_args
    429     def show(self, ctx=None, **kwargs):
--> 430         return show_image(self, ctx=ctx, **{**self._show_args, **kwargs})
    431 
    432 # %% ../nbs/00_torch_core.ipynb 107

  /usr/local/lib/python3.10/dist-packages/fastai/torch_core.py(78)show_image()
     76     if figsize is None: figsize = (_fig_bounds(im.shape[0]), _fig_bounds(im.shape[1]))
     77     if ax is None: _,ax = plt.subplots(figsize=figsize)
---> 78     ax.imshow(im, **kwargs)
     79     if title is not None: ax.set_title(title)
     80     ax.axis('off')

  /usr/local/lib/python3.10/dist-packages/matplotlib/__init__.py(1442)inner()
   1440     def inner(ax, *args, data=None, **kwargs):
   1441         if data is None:
-> 1442             return func(ax, *map(sanitize_sequence, args), **kwargs)
   1443 
   1444         bound = new_sig.bind(ax, *args, **kwargs)

  /usr/local/lib/python3.10/dist-packages/matplotlib/axes/_axes.py(5658)imshow()
   5656             aspect = mpl.rcParams['image.aspect']
   5657         self.set_aspect(aspect)
-> 5658         im = mimage.AxesImage(self, cmap=cmap, norm=norm,
   5659                               interpolation=interpolation, origin=origin,
   5660                               extent=extent, filternorm=filternorm,

  /usr/local/lib/python3.10/dist-packages/matplotlib/_api/deprecation.py(454)wrapper()
    452                 "parameter will become keyword-only %(removal)s.",
    453                 name=name, obj_type=f"parameter of {func.__name__}()")
--> 454         return func(*args, **kwargs)
    455 
    456     # Don't modify *func*'s signature, as boilerplate.py needs it.

  /usr/local/lib/python3.10/dist-packages/matplotlib/image.py(922)__init__()
    920         self._extent = extent
    921 
--> 922         super().__init__(
    923             ax,
    924             cmap=cmap,

  /usr/local/lib/python3.10/dist-packages/matplotlib/image.py(274)__init__()
    272         self._imcache = None
    273 
--> 274         self._internal_update(kwargs)
    275 
    276     def __str__(self):

  /usr/local/lib/python3.10/dist-packages/matplotlib/artist.py(1223)_internal_update()
   1221         The lack of prenormalization is to maintain backcompatibility.
   1222         """
-> 1223         return self._update_props(
   1224             kwargs, "{cls.__name__}.set() got an unexpected keyword argument "
   1225             "{prop_name!r}")

> /usr/local/lib/python3.10/dist-packages/matplotlib/artist.py(1197)_update_props()
   1195                     func = getattr(self, f"set_{k}", None)
   1196                     if not callable(func):
-> 1197                         raise AttributeError(
   1198                             errfmt.format(cls=type(self), prop_name=k))
   1199                     ret.append(func(v))

My code:

bts = [[TSNormalize(), TSToPlot(224)], 
       [TSNormalize(), TSToMat(224)], 
       [TSNormalize(), TSToGADF(224)], 
       [TSNormalize(), TSToGASF(224)], 
       [TSNormalize(), TSToMTF(224)], 
       [TSNormalize(), TSToRP(224)]]

# Apply sliding window
    X,y = SlidingWindow(window_length, get_x = get_x, get_y=get_y)(data)

    # Apply labels to each window
    y = apply_labels_to_windows(y, window_length)

    # Get splits
    splits = get_splits(y, valid_size=0.2, stratify=True, shuffle=False, show_plot=False)
    tfms = [None, [Categorize()]]

    dsets = TSDatasets(X, y, tfms=tfms, splits=splits, inplace=True)
    dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=64, batch_tfms=bts[2], num_workers=0)

    learn = tsimage_learner(dls, xresnet34)

    learn.fit_one_cycle(25, lr_max=1e-3)

    learn.recorder.plot_metrics()

    # --> problem at this step
    learn.show_results()

    learn.show_probas()

    interp = ClassificationInterpretation.from_learner(learn)
    interp.plot_confusion_matrix()