zalandoresearch / pytorch-ts

PyTorch based Probabilistic Time Series forecasting framework based on GluonTS backend
MIT License
1.21k stars 191 forks source link

Implicit-Quantile-Network-Example.ipynb fails #100

Open david-waterworth opened 2 years ago

david-waterworth commented 2 years ago

I installed pytorch-ts by checking out the repo and installing via pip install -e . after installing 1.9.1+cu111

I get the following error trying to run the Implicit-Quantile-Network-Example.ipynb notebook

predictor = estimator.train(dataset.train, num_workers=8)

RuntimeError: input.size(-1) must be equal to input_size. Expected 62, got 63

The full traceback is attached below


RuntimeError Traceback (most recent call last) /home/dave/dev/pytorch-ts/examples/Implicit-Quantile-Network-Example.ipynb Cell 7' in <cell line: 1>() ----> 1 predictor = estimator.train(dataset.train, num_workers=8)

File ~/dev/pytorch-ts/pts/model/estimator.py:179, in PyTorchEstimator.train(self, training_data, validation_data, num_workers, prefetch_factor, shuffle_buffer_length, cache_data, kwargs) 169 def train( 170 self, 171 training_data: Dataset, (...) 177 kwargs, 178 ) -> PyTorchPredictor: --> 179 return self.train_model( 180 training_data, 181 validation_data, 182 num_workers=num_workers, 183 prefetch_factor=prefetch_factor, 184 shuffle_buffer_length=shuffle_buffer_length, 185 cache_data=cache_data, 186 **kwargs, 187 ).predictor

File ~/dev/pytorch-ts/pts/model/estimator.py:151, in PyTorchEstimator.train_model(self, training_data, validation_data, num_workers, prefetch_factor, shuffle_buffer_length, cache_data, kwargs) 133 validation_iter_dataset = TransformedIterableDataset( 134 dataset=validation_data, 135 transform=transformation (...) 139 cache_data=cache_data, 140 ) 141 validation_data_loader = DataLoader( 142 validation_iter_dataset, 143 batch_size=self.trainer.batch_size, (...) 148 kwargs, 149 ) --> 151 self.trainer( 152 net=trained_net, 153 train_iter=training_data_loader, 154 validation_iter=validation_data_loader, 155 ) 157 return TrainOutput( 158 transformation=transformation, 159 trained_net=trained_net, (...) 162 ), 163 )

File ~/dev/pytorch-ts/pts/trainer.py:67, in Trainer.call(self, net, train_iter, validation_iter) 64 optimizer.zero_grad() 66 inputs = [v.to(self.device) for v in data_entry.values()] ---> 67 output = net(*inputs) 69 if isinstance(output, (list, tuple)): 70 loss = output[0]

File ~/dev/pytorch-ts/.venv/lib/python3.8/site-packages/torch/nn/modules/module.py:1051, in Module._call_impl(self, *input, *kwargs) 1047 # If we don't have any hooks, we want to skip the rest of the logic in 1048 # this function, and just call forward. 1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks 1050 or _global_forward_hooks or _global_forward_pre_hooks): -> 1051 return forward_call(input, **kwargs) 1052 # Do not call functions when jit is used 1053 full_backward_hooks, non_full_backward_hooks = [], []

File ~/dev/pytorch-ts/pts/model/deepar/deepar_network.py:246, in DeepARTrainingNetwork.forward(self, feat_static_cat, feat_static_real, past_time_feat, past_target, past_observed_values, future_time_feat, future_target, future_observed_values) 235 def forward( 236 self, 237 feat_static_cat: torch.Tensor, (...) 244 future_observed_values: torch.Tensor, 245 ) -> torch.Tensor: --> 246 distr = self.distribution( 247 feat_static_cat=feat_static_cat, 248 feat_static_real=feat_static_real, 249 past_time_feat=past_time_feat, 250 past_target=past_target, 251 past_observed_values=past_observed_values, 252 future_time_feat=future_time_feat, 253 future_target=future_target, 254 future_observed_values=future_observed_values, 255 ) 257 # put together target sequence 258 # (batch_size, seq_len, *target_shape) 259 target = torch.cat( 260 ( 261 past_target[:, self.history_length - self.context_length :, ...], (...) 264 dim=1, 265 )

File ~/dev/pytorch-ts/pts/model/deepar/deepar_network.py:221, in DeepARTrainingNetwork.distribution(self, feat_static_cat, feat_static_real, past_time_feat, past_target, past_observed_values, future_time_feat, future_target, future_observed_values) 210 def distribution( 211 self, 212 feat_static_cat: torch.Tensor, (...) 219 future_observed_values: torch.Tensor, 220 ) -> Distribution: --> 221 rnnoutputs, , scale, _ = self.unroll_encoder( 222 feat_static_cat=feat_static_cat, 223 feat_static_real=feat_static_real, 224 past_time_feat=past_time_feat, 225 past_target=past_target, 226 past_observed_values=past_observed_values, 227 future_time_feat=future_time_feat, 228 future_target=future_target, 229 ) 231 distr_args = self.proj_distr_args(rnn_outputs) 233 return self.distr_output.distribution(distr_args, scale=scale)

File ~/dev/pytorch-ts/pts/model/deepar/deepar_network.py:200, in DeepARNetwork.unroll_encoder(self, feat_static_cat, feat_static_real, past_time_feat, past_target, past_observed_values, future_time_feat, future_target) 197 inputs = torch.cat((input_lags, time_feat, repeated_static_feat), dim=-1) 199 # unroll encoder --> 200 outputs, state = self.rnn(inputs) 202 # outputs: (batch_size, seq_len, num_cells) 203 # state: list of (num_layers, batch_size, num_cells) tensors 204 # scale: (batch_size, 1, *target_shape) 205 # static_feat: (batch_size, num_features + prod(target_shape)) 206 return outputs, state, scale, static_feat

File ~/dev/pytorch-ts/.venv/lib/python3.8/site-packages/torch/nn/modules/module.py:1051, in Module._call_impl(self, *input, *kwargs) 1047 # If we don't have any hooks, we want to skip the rest of the logic in 1048 # this function, and just call forward. 1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks 1050 or _global_forward_hooks or _global_forward_pre_hooks): -> 1051 return forward_call(input, **kwargs) 1052 # Do not call functions when jit is used 1053 full_backward_hooks, non_full_backward_hooks = [], []

File ~/dev/pytorch-ts/.venv/lib/python3.8/site-packages/torch/nn/modules/rnn.py:835, in GRU.forward(self, input, hx) 830 else: 831 # Each batch of the hidden state should match the input sequence that 832 # the user believes he/she is passing in. 833 hx = self.permute_hidden(hx, sorted_indices) --> 835 self.check_forward_args(input, hx, batch_sizes) 836 if batch_sizes is None: 837 result = _VF.gru(input, hx, self._flat_weights, self.bias, self.num_layers, 838 self.dropout, self.training, self.bidirectional, self.batch_first)

File ~/dev/pytorch-ts/.venv/lib/python3.8/site-packages/torch/nn/modules/rnn.py:229, in RNNBase.check_forward_args(self, input, hidden, batch_sizes) 228 def check_forward_args(self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]): --> 229 self.check_input(input, batch_sizes) 230 expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes) 232 self.check_hidden_size(hidden, expected_hidden_size)

File ~/dev/pytorch-ts/.venv/lib/python3.8/site-packages/torch/nn/modules/rnn.py:205, in RNNBase.check_input(self, input, batch_sizes) 201 raise RuntimeError( 202 'input must have {} dimensions, got {}'.format( 203 expected_input_dim, input.dim())) 204 if self.input_size != input.size(-1): --> 205 raise RuntimeError( 206 'input.size(-1) must be equal to input_size. Expected {}, got {}'.format( 207 self.input_size, input.size(-1)))

kashif commented 2 years ago

thank you! I will fix it! In the meantime can you set the input_size to 63 and try?