Open k4kakka opened 10 months ago
Are you using the default model?
On Thu, Jan 4, 2024, 8:53 PM k4kakka @.***> wrote:
) Traceback (most recent call last): File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\gradio\routes.py", line 437, in run_predict output = await app.get_blocks().process_api( File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\gradio\blocks.py", line 1352, in process_api result = await self.call_function( File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\gradio\blocks.py", line 1093, in call_function prediction = await utils.async_iteration(iterator) File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\gradio\utils.py", line 341, in async_iteration return await iterator.anext() File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\gradio\utils.py", line 334, in anext return await anyio.to_thread.run_sync( File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\anyio\to_thread.py", line 33, in run_sync return await get_asynclib().run_sync_in_worker_thread( File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\anyio_backends_asyncio.py", line 877, in run_sync_in_worker_thread return await future File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\anyio_backends_asyncio.py", line 807, in run result = context.run(func, args) File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\gradio\utils.py", line 317, in run_sync_iterator_async return next(iterator) File "C:\Applications\TTS\oobabooga_windows\tts-generation-webui\src\tortoise\generation_tab_tortoise.py", line 165, in gen yield from generate_tortoise_long( File "C:\Applications\TTS\oobabooga_windows\tts-generation-webui\src\tortoise\gen_tortoise.py", line 199, in generate_tortoise_long datas = generate_tortoise( File "C:\Applications\TTS\oobabooga_windows\tts-generation-webui\src\tortoise\gen_tortoise.py", line 110, in generate_tortoise tts = get_tts() File "C:\Applications\TTS\oobabooga_windows\tts-generation-webui\src\tortoise\gen_tortoise.py", line 84, in get_tts MODEL = TextToSpeech( File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\tortoise\api.py", line 247, in init self.autoregressive.load_state_dict(torch.load(get_model_path('autoregressive.pth', models_dir)), strict=False) File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\torch\serialization.py", line 797, in load with _open_zipfile_reader(opened_file) as opened_zipfile: File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\torch\serialization.py", line 283, in init super().init*(torch._C.PyTorchFileReader(name_or_buffer)) RuntimeError: PytorchStreamReader failed reading zip archive: failed finding central directory
— Reply to this email directly, view it on GitHub https://github.com/rsxdalv/tts-generation-webui/issues/240, or unsubscribe https://github.com/notifications/unsubscribe-auth/ABTRXI3EQSXHSANCVYWAJ2DYM33CRAVCNFSM6AAAAABBNLYGMGVHI2DSMVQWIX3LMV43ASLTON2WKOZSGA3DMMJXGQZTAOI . You are receiving this because you are subscribed to this thread.Message ID: @.***>
) Traceback (most recent call last): File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\gradio\routes.py", line 437, in run_predict output = await app.get_blocks().process_api( File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\gradio\blocks.py", line 1352, in process_api result = await self.call_function( File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\gradio\blocks.py", line 1093, in call_function prediction = await utils.async_iteration(iterator) File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\gradio\utils.py", line 341, in async_iteration return await iterator.anext() File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\gradio\utils.py", line 334, in anext return await anyio.to_thread.run_sync( File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\anyio\to_thread.py", line 33, in run_sync return await get_asynclib().run_sync_in_worker_thread( File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\anyio_backends_asyncio.py", line 877, in run_sync_in_worker_thread return await future File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\anyio_backends_asyncio.py", line 807, in run result = context.run(func, *args) File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\gradio\utils.py", line 317, in run_sync_iterator_async return next(iterator) File "C:\Applications\TTS\oobabooga_windows\tts-generation-webui\src\tortoise\generation_tab_tortoise.py", line 165, in gen yield from generate_tortoise_long( File "C:\Applications\TTS\oobabooga_windows\tts-generation-webui\src\tortoise\gen_tortoise.py", line 199, in generate_tortoise_long datas = generate_tortoise( File "C:\Applications\TTS\oobabooga_windows\tts-generation-webui\src\tortoise\gen_tortoise.py", line 110, in generate_tortoise tts = get_tts() File "C:\Applications\TTS\oobabooga_windows\tts-generation-webui\src\tortoise\gen_tortoise.py", line 84, in get_tts MODEL = TextToSpeech( File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\tortoise\api.py", line 247, in init self.autoregressive.load_state_dict(torch.load(get_model_path('autoregressive.pth', models_dir)), strict=False) File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\torch\serialization.py", line 797, in load with _open_zipfile_reader(opened_file) as opened_zipfile: File "C:\Applications\TTS\oobabooga_windows\installer_files\env\lib\site-packages\torch\serialization.py", line 283, in init super().init(torch._C.PyTorchFileReader(name_or_buffer)) RuntimeError: PytorchStreamReader failed reading zip archive: failed finding central directory