AllTalk is based on the Coqui TTS engine, similar to the Coqui_tts extension for Text generation webUI, however supports a variety of advanced features, such as a settings page, low VRAM support, DeepSpeed, narrator, model finetuning, custom models, wav file maintenance. It can also be used with 3rd Party software via JSON calls.
GNU Affero General Public License v3.0
1.16k
stars
122
forks
source link
xttsv2 (AllTalkV2) Streaming not working on ST (ASGI Exception) #379
Did a fresh install of AllTalk V2 and Streaming is not working Silly Tavern. Standard generation is working fine.
This is the error i get on the terminal
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\responses.py", line 257, in call
await wrap(partial(self.listen_for_disconnect, receive))
File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\responses.py", line 253, in wrap
await func()
File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\responses.py", line 230, in listen_for_disconnect
message = await receive()
^^^^^^^^^^^^^^^
File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\uvicorn\protocols\http\h11_impl.py", line 534, in receive
await self.message_event.wait()
File "D:\SD\alltalk\alltalk_environment\env\Lib\asyncio\locks.py", line 213, in wait
await fut
asyncio.exceptions.CancelledError: Cancelled by cancel scope 2133557f990
During handling of the above exception, another exception occurred:
Exception Group Traceback (most recent call last):
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\uvicorn\protocols\http\h11_impl.py", line 406, in run_asgi
| result = await app( # type: ignore[func-returns-value]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\uvicorn\middleware\proxy_headers.py", line 60, in call
| return await self.app(scope, receive, send)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\fastapi\applications.py", line 1054, in call
| await super().call(scope, receive, send)
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\applications.py", line 113, in call
| await self.middleware_stack(scope, receive, send)
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\middleware\errors.py", line 187, in call
| raise exc
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\middleware\errors.py", line 165, in call
| await self.app(scope, receive, _send)
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\middleware\cors.py", line 85, in call
| await self.app(scope, receive, send)
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\middleware\exceptions.py", line 62, in call
| await wrap_app_handling_exceptions(self.app, conn)(scope, receive, send)
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette_exception_handler.py", line 62, in wrapped_app
| raise exc
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette_exception_handler.py", line 51, in wrapped_app
| await app(scope, receive, sender)
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\routing.py", line 715, in call
| await self.middleware_stack(scope, receive, send)
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\routing.py", line 735, in app
| await route.handle(scope, receive, send)
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\routing.py", line 288, in handle
| await self.app(scope, receive, send)
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\routing.py", line 76, in app
| await wrap_app_handling_exceptions(app, request)(scope, receive, send)
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette_exception_handler.py", line 62, in wrapped_app
| raise exc
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette_exception_handler.py", line 51, in wrapped_app
| await app(scope, receive, sender)
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\routing.py", line 74, in app
| await response(scope, receive, send)
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\responses.py", line 250, in call
| async with anyio.create_task_group() as task_group:
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\anyio_backends_asyncio.py", line 685, in aexit
| raise BaseExceptionGroup(
| ExceptionGroup: unhandled errors in a TaskGroup (1 sub-exception)
+-+---------------- 1 ----------------
| Traceback (most recent call last):
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\responses.py", line 253, in wrap
| await func()
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\responses.py", line 242, in stream_response
| async for chunk in self.body_iterator:
| File "D:\SD\alltalk\tts_server.py", line 708, in stream_response
| async for chunk in response:
| File "D:\SD\alltalk\system\tts_engines\xtts\model_engine.py", line 556, in generate_tts
| for i, chunk in enumerate(output):
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\torch\utils_contextlib.py", line 35, in generator_context
| response = gen.send(None)
| ^^^^^^^^^^^^^^
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\TTS\tts\models\xtts.py", line 658, in inference_stream
| gpt_generator = self.gpt.get_generator(
| ^^^^^^^^^^^^^^^^^^^^^^^
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\TTS\tts\layers\xtts\gpt.py", line 602, in get_generator
| return self.gpt_inference.generate_stream(
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
| return func(*args, **kwargs)
| ^^^^^^^^^^^^^^^^^^^^^
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\TTS\tts\layers\xtts\stream_generator.py", line 168, in generate
| self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=device)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\torch\nn\modules\module.py", line 1688, in getattr
| raise AttributeError(f"'{type(self).name}' object has no attribute '{name}'")
| AttributeError: 'GPT2InferenceModel' object has no attribute '_prepare_special_tokens'
+------------------------------------
diagnostics.log
Did a fresh install of AllTalk V2 and Streaming is not working Silly Tavern. Standard generation is working fine.
This is the error i get on the terminal
ERROR: Exception in ASGI application Traceback (most recent call last): File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\responses.py", line 257, in call await wrap(partial(self.listen_for_disconnect, receive)) File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\responses.py", line 253, in wrap await func() File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\starlette\responses.py", line 230, in listen_for_disconnect message = await receive() ^^^^^^^^^^^^^^^ File "D:\SD\alltalk\alltalk_environment\env\Lib\site-packages\uvicorn\protocols\http\h11_impl.py", line 534, in receive await self.message_event.wait() File "D:\SD\alltalk\alltalk_environment\env\Lib\asyncio\locks.py", line 213, in wait await fut asyncio.exceptions.CancelledError: Cancelled by cancel scope 2133557f990
During handling of the above exception, another exception occurred: