File "[/tmp/functorch/functorch/_src/eager_transforms.py]()", line 254, in vjp
primals_out = func(*diff_primals)
File "[/pytorch/benchmarks/functional_autograd_benchmark/audio_text_models.py]()", line 71, in forward
out, out_sizes = model(inputs, inputs_sizes)
File "[/pytorch/torch/nn/modules/module.py]()", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "[/pytorch/benchmarks/functional_autograd_benchmark/torchaudio_models.py]()", line 257, in forward
x = rnn(x, output_lengths)
File "[/pytorch/torch/nn/modules/module.py]()", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "[/pytorch/benchmarks/functional_autograd_benchmark/torchaudio_models.py]()", line 161, in forward
x, h = self.rnn(x)
File "[/pytorch/torch/nn/modules/module.py]()", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "[/pytorch/torch/nn/modules/rnn.py]()", line 772, in forward
result = _VF.lstm(input, batch_sizes, hx, self._flat_weights, self.bias,
NotImplementedError: Cannot access storage of TensorWrapper
Another issue with functional_autograd_benchmark with functorch (code) when running vjp on deepspeech model: