zalandoresearch / pytorch-ts

PyTorch based Probabilistic Time Series forecasting framework based on GluonTS backend
MIT License
1.22k stars 191 forks source link

Readme example not working #1

Closed StatMixedML closed 4 years ago

StatMixedML commented 4 years ago

Description

Many thanks to the authors for making the implementation available! Great initiative.

I am trying to run the README.md example, but it is not working.

Code Snippet

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

estimator = DeepAREstimator(freq="5min",
                            prediction_length=12,
                            input_size=43,
                            trainer=Trainer(epochs=10,
                                            device=device))
predictor = estimator.train(training_data=training_data)

Error

Running the code in readme until the snippet works just fine. When I run estimator.train(training_data=training_data), the above snipped throws the following error:

0it [00:02, ?it/s]
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-21-57e06c572bad> in <module>
      6                             trainer=Trainer(epochs=10,
      7                                             device=device))
----> 8 predictor = estimator.train(training_data=training_data)

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\pts\model\estimator.py in train(self, training_data)
    133 
    134     def train(self, training_data: Dataset) -> Predictor:
--> 135         return self.train_model(training_data).predictor

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\pts\model\estimator.py in train_model(self, training_data)
    118         trained_net = self.create_training_network(self.trainer.device)
    119 
--> 120         self.trainer(
    121             net=trained_net,
    122             input_names=get_module_forward_input_names(trained_net),

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\pts\trainer.py in __call__(self, net, input_names, data_loader)
     50                     inputs = [data_entry[k].to(self.device) for k in input_names]
     51 
---> 52                     output = net(*inputs)
     53                     if isinstance(output, (list, tuple)):
     54                         loss = output[0]

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    530             result = self._slow_forward(*input, **kwargs)
    531         else:
--> 532             result = self.forward(*input, **kwargs)
    533         for hook in self._forward_hooks.values():
    534             hook_result = hook(self, input, result)

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\pts\model\deepar\deepar_network.py in forward(self, feat_static_cat, feat_static_real, past_time_feat, past_target, past_observed_values, future_time_feat, future_target, future_observed_values)
    244         future_observed_values: torch.Tensor,
    245     ) -> torch.Tensor:
--> 246         distr = self.distribution(
    247             feat_static_cat=feat_static_cat,
    248             feat_static_real=feat_static_real,

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\pts\model\deepar\deepar_network.py in distribution(self, feat_static_cat, feat_static_real, past_time_feat, past_target, past_observed_values, future_time_feat, future_target, future_observed_values)
    219         future_observed_values: torch.Tensor,
    220     ) -> Distribution:
--> 221         rnn_outputs, _, scale, _ = self.unroll_encoder(
    222             feat_static_cat=feat_static_cat,
    223             feat_static_real=feat_static_real,

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\pts\model\deepar\deepar_network.py in unroll_encoder(self, feat_static_cat, feat_static_real, past_time_feat, past_target, past_observed_values, future_time_feat, future_target)
    166 
    167         # (batch_size, num_features)
--> 168         embedded_cat = self.embedder(feat_static_cat)
    169 
    170         # in addition to embedding features, use the log scale as it can help

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    530             result = self._slow_forward(*input, **kwargs)
    531         else:
--> 532             result = self.forward(*input, **kwargs)
    533         for hook in self._forward_hooks.values():
    534             hook_result = hook(self, input, result)

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\pts\modules\feature.py in forward(self, features)
     28 
     29         return torch.cat(
---> 30             [
     31                 embed(cat_feature_slice.squeeze(-1))
     32                 for embed, cat_feature_slice in zip(

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\pts\modules\feature.py in <listcomp>(.0)
     29         return torch.cat(
     30             [
---> 31                 embed(cat_feature_slice.squeeze(-1))
     32                 for embed, cat_feature_slice in zip(
     33                     self.__embedders, cat_feature_slices

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    530             result = self._slow_forward(*input, **kwargs)
    531         else:
--> 532             result = self.forward(*input, **kwargs)
    533         for hook in self._forward_hooks.values():
    534             hook_result = hook(self, input, result)

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\torch\nn\modules\sparse.py in forward(self, input)
    110 
    111     def forward(self, input):
--> 112         return F.embedding(
    113             input, self.weight, self.padding_idx, self.max_norm,
    114             self.norm_type, self.scale_grad_by_freq, self.sparse)

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\torch\nn\functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
   1482         # remove once script supports set_grad_enabled
   1483         _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
-> 1484     return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
   1485 
   1486 

RuntimeError: Expected tensor for argument #1 'indices' to have scalar type Long; but got torch.cuda.IntTensor instead (while checking arguments for embedding)

Environment

kashif commented 4 years ago

thanks @StatMixedML trying to reproduce... can you try to train with device="cpu" ? I could not reproduce it on linux or mac... so might be a windows thing... Can you pull again and try with this commit I just pushed?

StatMixedML commented 4 years ago

@kashif Thanks for the quick reply.

I have re-created my environment using your latest commits, also setting device="cpu". Same error

0it [00:02, ?it/s]
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-7-99cbb6cd87b6> in <module>
      6                             trainer=Trainer(epochs=10,
      7                                             device="cpu"))
----> 8 predictor = estimator.train(training_data=training_data)

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\pts\model\estimator.py in train(self, training_data)
    133 
    134     def train(self, training_data: Dataset) -> Predictor:
--> 135         return self.train_model(training_data).predictor

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\pts\model\estimator.py in train_model(self, training_data)
    118         trained_net = self.create_training_network(self.trainer.device)
    119 
--> 120         self.trainer(
    121             net=trained_net,
    122             input_names=get_module_forward_input_names(trained_net),

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\pts\trainer.py in __call__(self, net, input_names, data_loader)
     50                     inputs = [data_entry[k].to(self.device) for k in input_names]
     51 
---> 52                     output = net(*inputs)
     53                     if isinstance(output, (list, tuple)):
     54                         loss = output[0]

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    530             result = self._slow_forward(*input, **kwargs)
    531         else:
--> 532             result = self.forward(*input, **kwargs)
    533         for hook in self._forward_hooks.values():
    534             hook_result = hook(self, input, result)

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\pts\model\deepar\deepar_network.py in forward(self, feat_static_cat, feat_static_real, past_time_feat, past_target, past_observed_values, future_time_feat, future_target, future_observed_values)
    244         future_observed_values: torch.Tensor,
    245     ) -> torch.Tensor:
--> 246         distr = self.distribution(
    247             feat_static_cat=feat_static_cat,
    248             feat_static_real=feat_static_real,

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\pts\model\deepar\deepar_network.py in distribution(self, feat_static_cat, feat_static_real, past_time_feat, past_target, past_observed_values, future_time_feat, future_target, future_observed_values)
    219         future_observed_values: torch.Tensor,
    220     ) -> Distribution:
--> 221         rnn_outputs, _, scale, _ = self.unroll_encoder(
    222             feat_static_cat=feat_static_cat,
    223             feat_static_real=feat_static_real,

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\pts\model\deepar\deepar_network.py in unroll_encoder(self, feat_static_cat, feat_static_real, past_time_feat, past_target, past_observed_values, future_time_feat, future_target)
    166 
    167         # (batch_size, num_features)
--> 168         embedded_cat = self.embedder(feat_static_cat)
    169 
    170         # in addition to embedding features, use the log scale as it can help

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    530             result = self._slow_forward(*input, **kwargs)
    531         else:
--> 532             result = self.forward(*input, **kwargs)
    533         for hook in self._forward_hooks.values():
    534             hook_result = hook(self, input, result)

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\pts\modules\feature.py in forward(self, features)
     28 
     29         return torch.cat(
---> 30             [
     31                 embed(cat_feature_slice.squeeze(-1))
     32                 for embed, cat_feature_slice in zip(

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\pts\modules\feature.py in <listcomp>(.0)
     29         return torch.cat(
     30             [
---> 31                 embed(cat_feature_slice.squeeze(-1))
     32                 for embed, cat_feature_slice in zip(
     33                     self.__embedders, cat_feature_slices

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    530             result = self._slow_forward(*input, **kwargs)
    531         else:
--> 532             result = self.forward(*input, **kwargs)
    533         for hook in self._forward_hooks.values():
    534             hook_result = hook(self, input, result)

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\torch\nn\modules\sparse.py in forward(self, input)
    110 
    111     def forward(self, input):
--> 112         return F.embedding(
    113             input, self.weight, self.padding_idx, self.max_norm,
    114             self.norm_type, self.scale_grad_by_freq, self.sparse)

C:\ProgramData\Anaconda3\envs\pytorchts\lib\site-packages\torch\nn\functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
   1482         # remove once script supports set_grad_enabled
   1483         _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
-> 1484     return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
   1485 
   1486 

RuntimeError: Expected tensor for argument #1 'indices' to have scalar type Long; but got torch.IntTensor instead (while checking arguments for embedding)

It indees seems to be a Windows related issue, since it is working in my Linux sub-system.