Closed tianqihou closed 2 days ago
@tianqihou
I know what that error is! I recently faced the same issue when running my model on a new server. Simply put, try running:
pip install transformers==4.45.1.
The current version of HF transformers have an issue, as I also reported it: github.com/huggingface/transformers/issues/34448#issuecomment-2458341307
thank you, It works well.
The error message is as follows:
⏰ converting audio: 0m 0s 152.04ms Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/gradio/queueing.py", line 624, in process_events response = await route_utils.call_process_api( File "/usr/local/lib/python3.10/dist-packages/gradio/route_utils.py", line 323, in call_process_api output = await app.get_blocks().process_api( File "/usr/local/lib/python3.10/dist-packages/gradio/blocks.py", line 2015, in process_api result = await self.call_function( File "/usr/local/lib/python3.10/dist-packages/gradio/blocks.py", line 1562, in call_function prediction = await anyio.to_thread.run_sync( # type: ignore File "/usr/local/lib/python3.10/dist-packages/anyio/to_thread.py", line 33, in run_sync return await get_asynclib().run_sync_in_worker_thread( File "/usr/local/lib/python3.10/dist-packages/anyio/_backends/_asyncio.py", line 877, in run_sync_in_worker_thread return await future File "/usr/local/lib/python3.10/dist-packages/anyio/_backends/_asyncio.py", line 807, in run result = context.run(func, args) File "/usr/local/lib/python3.10/dist-packages/gradio/utils.py", line 865, in wrapper response = f(args, kwargs) File "", line 67, in process_audio
midifile = transcribe(model, audio_info)
File "", line 135, in transcribe
pred_tokenarr, = model.inference_file(bsz=8, audio_segments=audio_segments)
File "/content/amt/src/model/ymt3.py", line 566, in inference_file
preds = self.inference(x, task_tokens).detach().cpu().numpy()
File "/content/amt/src/model/ymt3.py", line 492, in inference
pred_ids = task_cond_dec_generate(decoder=self.decoder,
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 116, in decorate_context
return func(*args, *kwargs)
File "/content/amt/src/model/t5mod_helper.py", line 82, in task_cond_dec_generate
dec_hs, past_key_values = decoder(inputs_embeds=dec_inputs_embeds,
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(args, kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, kwargs)
File "/content/amt/src/model/t5mod.py", line 500, in forward
return self._forward_no_compile(kwargs)
File "/content/amt/src/model/t5mod.py", line 503, in _forward_no_compile
return self._forward(kwargs)
File "/content/amt/src/model/t5mod.py", line 531, in _forward
decoder_outputs = self.decoder(
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(args, kwargs)
File "/content/amt/src/model/t5mod.py", line 340, in forward
layer_outputs = layer_module(
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, *kwargs)
File "/content/amt/src/model/t5mod.py", line 98, in forward
self_attention_outputs = self.layer[0](
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(args, kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, kwargs)
File "/usr/local/lib/python3.10/dist-packages/transformers/models/t5/modeling_t5.py", line 593, in forward
attention_output = self.SelfAttention(
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(args, kwargs)
File "/usr/local/lib/python3.10/dist-packages/transformers/models/t5/modeling_t5.py", line 525, in forward
real_seq_length = query_length if query_length is not None else cache_position[-1] + 1
TypeError: 'NoneType' object is not subscriptable