ILikeAI / AlwaysReddy

AlwaysReddy is a LLM voice assistant that is always just a hotkey away.
MIT License
656 stars 67 forks source link

httpx.LocalProtocolError: Illegal header value b'Bearer ' #37

Closed BracerJack closed 5 months ago

BracerJack commented 5 months ago

I have no idea, I am just going to put it here ;p I have executed pip install -r requirements.txt and pip install -r faster_whisper_requirements.txt [because I want everything to run locally].

Playing sound FX: sounds/recording-start use_clipboard: False Stopping recording... Playing sound FX: sounds/recording-end Recording saved to audio_files\temp_recording.wav An error occurred during the transcription process: Connection error. Traceback (most recent call last): File "C:\AlwaysReddy\venv\lib\site-packages\httpx_transports\default.py", line 69, in map_httpcore_exceptions yield File "C:\AlwaysReddy\venv\lib\site-packages\httpx_transports\default.py", line 233, in handle_request resp = self._pool.handle_request(req) File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\connection_pool.py", line 216, in handle_request raise exc from None File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\connection_pool.py", line 196, in handle_request response = connection.handle_request( File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\connection.py", line 101, in handle_request return self._connection.handle_request(request) File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\http11.py", line 143, in handle_request raise exc File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\http11.py", line 93, in handle_request self._send_request_headers(**kwargs) File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\http11.py", line 151, in _send_request_headers with map_exceptions({h11.LocalProtocolError: LocalProtocolError}): File "C:\Users\Username\AppData\Local\Programs\Python\Python310\lib\contextlib.py", line 153, in exit self.gen.throw(typ, value, traceback) File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_exceptions.py", line 14, in map_exceptions raise to_exc(exc) from exc httpcore.LocalProtocolError: Illegal header value b'Bearer '

The above exception was the direct cause of the following exception:

Traceback (most recent call last): File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 952, in _request response = self._client.send( File "C:\AlwaysReddy\venv\lib\site-packages\httpx_client.py", line 914, in send response = self._send_handling_auth( File "C:\AlwaysReddy\venv\lib\site-packages\httpx_client.py", line 942, in _send_handling_auth response = self._send_handling_redirects( File "C:\AlwaysReddy\venv\lib\site-packages\httpx_client.py", line 979, in _send_handling_redirects response = self._send_single_request(request) File "C:\AlwaysReddy\venv\lib\site-packages\httpx_client.py", line 1015, in _send_single_request response = transport.handle_request(request) File "C:\AlwaysReddy\venv\lib\site-packages\httpx_transports\default.py", line 232, in handle_request with map_httpcore_exceptions(): File "C:\Users\Username\AppData\Local\Programs\Python\Python310\lib\contextlib.py", line 153, in exit self.gen.throw(typ, value, traceback) File "C:\AlwaysReddy\venv\lib\site-packages\httpx_transports\default.py", line 86, in map_httpcore_exceptions raise mapped_exc(message) from exc httpx.LocalProtocolError: Illegal header value b'Bearer '

The above exception was the direct cause of the following exception:

Traceback (most recent call last): File "C:\AlwaysReddy\transcriber.py", line 42, in transcribe_audio transcript = self.client.transcribe_audio_file(full_path) File "C:\AlwaysReddy\transcription_apis\openai_api.py", line 24, in transcribe_audio_file transcript = self.client.audio.transcriptions.create( File "C:\AlwaysReddy\venv\lib\site-packages\openai\resources\audio\transcriptions.py", line 116, in create return self._post( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 1240, in post return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 921, in request return self._request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 976, in _request return self._retry_request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 1053, in _retry_request return self._request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 976, in _request return self._retry_request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 1053, in _retry_request return self._request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 986, in _request raise APIConnectionError(request=request) from err openai.APIConnectionError: Connection error.

The above exception was the direct cause of the following exception:

Traceback (most recent call last): File "C:\AlwaysReddy\main.py", line 87, in stop_recording transcript = self.transcription_manager.transcribe_audio(self.recorder.filename) File "C:\AlwaysReddy\transcriber.py", line 57, in transcribe_audio raise Exception(f"An error occurred during the transcription process: {e}") from e Exception: An error occurred during the transcription process: Connection error.

ILikeAI commented 5 months ago

Hmmm looks like there was an internet connection issue during transcription, but transcription should be happening locally.

Make sure your config file is correctly setup, the transcription section should look something like this:


### Transcription API Settings ###

## Faster Whisper local transcription ###
TRANSCRIPTION_API = "FasterWhisper" # this will use the local whisper model
WHISPER_MODEL = "tiny.en" # If you prefer not to use english set it to "tiny", if the transcription quality is too low then set it to "base" but this will be a little slower
BEAM_SIZE = 5

## OPENAI Hosted Transcription ###
# TRANSCRIPTION_API = "openai" # this will use the hosted openai api

Let me know what you find!

ILikeAI commented 5 months ago

Also double check you have set your API key in the .env file if you are wanting to use the openai API

BracerJack commented 5 months ago

Thanks for your feedback, I will try again, the API key is "" cause using Ollama.

TOGETHER_API_KEY="" OPENAI_API_KEY="" ANTHROPIC_API_KEY="sk-.." PERPLEXITY_API_KEY="pplx-.." OPENROUTER_API_KEY="sk-or..."

BracerJack commented 5 months ago

I have retested: 16 May 2024:

Press 'alt+ctrl+r' to start recording, press again to stop and transcribe. Double tap to the record hotkey to give AlwaysReddy the content currently copied in your clipboard. Press 'alt+ctrl+e' to cancel recording. Press 'alt+ctrl+w' to clear the chat history. use_clipboard: False Starting recording... Recording started... Playing sound FX: sounds/recording-start use_clipboard: False Stopping recording... Playing sound FX: sounds/recording-end Recording saved to audio_files\temp_recording.wav An error occurred during the transcription process: Connection error. Traceback (most recent call last): File "C:\AlwaysReddy\venv\lib\site-packages\httpx_transports\default.py", line 69, in map_httpcore_exceptions yield File "C:\AlwaysReddy\venv\lib\site-packages\httpx_transports\default.py", line 233, in handle_request resp = self._pool.handle_request(req) File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\connection_pool.py", line 216, in handle_request raise exc from None File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\connection_pool.py", line 196, in handle_request response = connection.handle_request( File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\connection.py", line 101, in handle_request return self._connection.handle_request(request) File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\http11.py", line 143, in handle_request raise exc File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\http11.py", line 93, in handle_request self._send_request_headers(**kwargs) File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\http11.py", line 151, in _send_request_headers with map_exceptions({h11.LocalProtocolError: LocalProtocolError}): File "C:\Users\username\AppData\Local\Programs\Python\Python310\lib\contextlib.py", line 153, in exit self.gen.throw(typ, value, traceback) File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_exceptions.py", line 14, in map_exceptions raise to_exc(exc) from exc httpcore.LocalProtocolError: Illegal header value b'Bearer '

The above exception was the direct cause of the following exception:

Traceback (most recent call last): File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 952, in _request response = self._client.send( File "C:\AlwaysReddy\venv\lib\site-packages\httpx_client.py", line 914, in send response = self._send_handling_auth( File "C:\AlwaysReddy\venv\lib\site-packages\httpx_client.py", line 942, in _send_handling_auth response = self._send_handling_redirects( File "C:\AlwaysReddy\venv\lib\site-packages\httpx_client.py", line 979, in _send_handling_redirects response = self._send_single_request(request) File "C:\AlwaysReddy\venv\lib\site-packages\httpx_client.py", line 1015, in _send_single_request response = transport.handle_request(request) File "C:\AlwaysReddy\venv\lib\site-packages\httpx_transports\default.py", line 232, in handle_request with map_httpcore_exceptions(): File "C:\Users\username\AppData\Local\Programs\Python\Python310\lib\contextlib.py", line 153, in exit self.gen.throw(typ, value, traceback) File "C:\AlwaysReddy\venv\lib\site-packages\httpx_transports\default.py", line 86, in map_httpcore_exceptions raise mapped_exc(message) from exc httpx.LocalProtocolError: Illegal header value b'Bearer '

The above exception was the direct cause of the following exception:

Traceback (most recent call last): File "C:\AlwaysReddy\transcriber.py", line 42, in transcribe_audio transcript = self.client.transcribe_audio_file(full_path) File "C:\AlwaysReddy\transcription_apis\openai_api.py", line 24, in transcribe_audio_file transcript = self.client.audio.transcriptions.create( File "C:\AlwaysReddy\venv\lib\site-packages\openai\resources\audio\transcriptions.py", line 116, in create return self._post( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 1240, in post return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 921, in request return self._request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 976, in _request return self._retry_request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 1053, in _retry_request return self._request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 976, in _request return self._retry_request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 1053, in _retry_request return self._request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 986, in _request raise APIConnectionError(request=request) from err openai.APIConnectionError: Connection error.

The above exception was the direct cause of the following exception:

Traceback (most recent call last): File "C:\AlwaysReddy\main.py", line 87, in stop_recording transcript = self.transcription_manager.transcribe_audio(self.recorder.filename) File "C:\AlwaysReddy\transcriber.py", line 57, in transcribe_audio raise Exception(f"An error occurred during the transcription process: {e}") from e Exception: An error occurred during the transcription process: Connection error.

BracerJack commented 5 months ago

This is the entire config:

MAKE A COPY OF THIS CALLED config.py

VERBOSE = True USE_GPU = False # Set to True to use GPU acceleration. Refer to the README for instructions on installing PyTorch with CUDA support.

COMPLETIONS API SETTINGS

Just uncomment the ONE api you want to use

LOCAL OPTIONS

OLLAMA COMPLETIONS API EXAMPLE

COMPLETIONS_API = "ollama" COMPLETION_MODEL = "llava-llama3" #"llama3" OLLAMA_API_BASE_URL = "http://localhost:11434" #This should be the default

LM Studio COMPLETIONS API EXAMPLE

COMPLETIONS_API = "lm_studio"

COMPLETION_MODEL = "local-model" # You dont need to update this

LM_STUDIO_API_BASE_URL = "http://localhost:1234/v1" #This should be the defualt

Hosted APIS

ANTHROPIC COMPLETIONS API EXAMPLE

COMPLETIONS_API = "anthropic"

COMPLETION_MODEL = "claude-3-sonnet-20240229"

TOGETHER COMPLETIONS API EXAMPLE

COMPLETIONS_API = "together"

COMPLETION_MODEL = "meta-llama/Llama-3-8b-chat-hf"

OPENAI COMPLETIONS API EXAMPLE

COMPLETIONS_API = "openai" COMPLETION_MODEL = "gpt-4-0125-preview"

PERPLEXITY COMPLETIONS API EXAMPLE

COMPLETIONS_API = "perplexity"

COMPLETION_MODEL = "llama-3-sonar-small-32k-online"

OPENROUTER COMPLETIONS API EXAMPLE

COMPLETIONS_API = "openrouter"

COMPLETION_MODEL = "openai/gpt-3.5-turbo"

Transcription API Settings

Faster Whisper local transcription

TRANSCRIPTION_API = "FasterWhisper" # this will use the local whisper model WHISPER_MODEL = "tiny.en" # If you prefer not to use english set it to "tiny", if the transcription quality is too low then set it to "base" but this will be a little slower BEAM_SIZE = 5

OPENAI Hosted Transcription

TRANSCRIPTION_API = "openai" # this will use the hosted openai api

TTS SETTINGS

TTS_ENGINE="openai" # 'piper' or 'openai' piper is local and fast but openai is better sounding

PIPER_VOICE = "default_female_voice" OPENAI_VOICE = "nova"

PROMPTS

ACTIVE_PROMPT = "default_prompt" #Right now there is only 1 prompt

HOTKEYS

CANCEL_HOTKEY = 'alt+ctrl+e' CLEAR_HISTORY_HOTKEY = 'alt+ctrl+w' RECORD_HOTKEY = 'alt+ctrl+r'

MISC

HOTKEY_DELAY = 0.5 AUDIO_FILE_DIR = "audio_files" MAX_TOKENS = 8000 #Max tokens allowed in memory at once START_SEQ = "-CLIPSTART-" #the model is instructed to place any text for the clipboard between the start and end seq END_SEQ = "-CLIPEND-" #the model is instructed to place any text for the clipboard between the start and end seq

AUDIO SETTINGS

BASE_VOLUME = 1 FS = 11025
START_SOUND_VOLUME = 0.05 END_SOUND_VOLUME = 0.05 CANCEL_SOUND_VOLUME = 0.09 MIN_RECORDING_DURATION = 0.3 MAX_RECORDING_DURATION= 600 # If you record for more than 10 minutes, the recording will stop automatically

BracerJack commented 5 months ago

This is the env: TOGETHER_API_KEY="" OPENAI_API_KEY="" ANTHROPIC_API_KEY="sk-.." PERPLEXITY_API_KEY="pplx-.." OPENROUTER_API_KEY="sk-or..."

ILikeAI commented 5 months ago

I see the issue, in your config you have uncommented the ollama and fasterwhisper sections correctly, but you need to comment out the openai sections, the openai completions and transcriptions APIs are still declared after ollama and faster whisper meaning it uses the apis instead. Update your config.py to this:

This is the entire config:

## MAKE A COPY OF THIS CALLED config.py
VERBOSE = True
USE_GPU = False  # Set to True to use GPU acceleration. Refer to the README for instructions on installing PyTorch with CUDA support.

### COMPLETIONS API SETTINGS  ###
# Just uncomment the ONE api you want to use

### LOCAL OPTIONS ###

## OLLAMA COMPLETIONS API EXAMPLE ##
COMPLETIONS_API = "ollama"
COMPLETION_MODEL = "llava-llama3" #"llama3"
OLLAMA_API_BASE_URL = "http://localhost:11434" #This should be the default

## LM Studio COMPLETIONS API EXAMPLE ##
# COMPLETIONS_API = "lm_studio" 
# COMPLETION_MODEL = "local-model" # You dont need to update this
# LM_STUDIO_API_BASE_URL = "http://localhost:1234/v1" #This should be the defualt

### Hosted APIS ###

## ANTHROPIC COMPLETIONS API EXAMPLE ##
# COMPLETIONS_API = "anthropic" 
# COMPLETION_MODEL = "claude-3-sonnet-20240229" 

## TOGETHER COMPLETIONS API EXAMPLE ##
# COMPLETIONS_API = "together"
# COMPLETION_MODEL = "meta-llama/Llama-3-8b-chat-hf" 

## OPENAI COMPLETIONS API EXAMPLE ##
# COMPLETIONS_API = "openai"
# COMPLETION_MODEL = "gpt-4-0125-preview"

## PERPLEXITY COMPLETIONS API EXAMPLE ##
# COMPLETIONS_API = "perplexity"
# COMPLETION_MODEL = "llama-3-sonar-small-32k-online"

## OPENROUTER COMPLETIONS API EXAMPLE ##
# COMPLETIONS_API = "openrouter"
# COMPLETION_MODEL = "openai/gpt-3.5-turbo"

### Transcription API Settings ###

## Faster Whisper local transcription ###
TRANSCRIPTION_API = "FasterWhisper" # this will use the local whisper model
WHISPER_MODEL = "tiny.en" # If you prefer not to use english set it to "tiny", if the transcription quality is too low then set it to "base" but this will be a little slower
BEAM_SIZE = 5

## OPENAI Hosted Transcription ###
# TRANSCRIPTION_API = "openai" # this will use the hosted openai api

### TTS SETTINGS ###
TTS_ENGINE="openai" # 'piper' or 'openai' piper is local and fast but openai is better sounding

PIPER_VOICE = "default_female_voice"
OPENAI_VOICE = "nova"

### PROMPTS ###
ACTIVE_PROMPT = "default_prompt" #Right now there is only 1 prompt

### HOTKEYS ###
CANCEL_HOTKEY = 'alt+ctrl+e'
CLEAR_HISTORY_HOTKEY = 'alt+ctrl+w'
RECORD_HOTKEY = 'alt+ctrl+r'

### MISC ###
HOTKEY_DELAY = 0.5
AUDIO_FILE_DIR = "audio_files"
MAX_TOKENS = 8000 #Max tokens allowed in memory at once
START_SEQ = "-CLIPSTART-" #the model is instructed to place any text for the clipboard between the start and end seq
END_SEQ = "-CLIPEND-" #the model is instructed to place any text for the clipboard between the start and end seq

### AUDIO SETTINGS ###
BASE_VOLUME = 1 
FS = 11025   
START_SOUND_VOLUME = 0.05
END_SOUND_VOLUME = 0.05
CANCEL_SOUND_VOLUME = 0.09
MIN_RECORDING_DURATION = 0.3
MAX_RECORDING_DURATION= 600 # If you record for more than 10 minutes, the recording will stop automatically

I added a # before TRANSCRIPTION_API = "openai" # this will use the hosted openai api to comment this out, and same for the openai completions section

## OPENAI COMPLETIONS API EXAMPLE ##
COMPLETIONS_API = "openai"
COMPLETION_MODEL = "gpt-4-0125-preview"

->

## OPENAI COMPLETIONS API EXAMPLE ##
# COMPLETIONS_API = "openai"
# COMPLETION_MODEL = "gpt-4-0125-preview"

This should resolve the issue, let me know if it dosent and reopen the issue :)

Sorry the whole config system is kind of confusing, id like to find something better

BracerJack commented 5 months ago

Hi, after directly using your config file, it recognized my voice and it "text responded", but it didn't talk, here is the output:
Please note that I am using the config file you gave me directly.

Using faster-whisper model: tiny.en and device: cpu

Press 'alt+ctrl+r' to start recording, press again to stop and transcribe. Double tap to the record hotkey to give AlwaysReddy the content currently copied in your clipboard. Press 'alt+ctrl+e' to cancel recording. Press 'alt+ctrl+w' to clear the chat history. use_clipboard: False Starting recording... Recording started... Playing sound FX: sounds/recording-start use_clipboard: False Stopping recording... Playing sound FX: sounds/recording-end Recording saved to audio_files\temp_recording.wav Transcribing audio file: audio_files\temp_recording.wav Detected language: en with probability 1.00

Transcription: Hello computer. Traceback (most recent call last): File "C:\AlwaysReddy\venv\lib\site-packages\httpx_transports\default.py", line 69, in map_httpcore_exceptions yield File "C:\AlwaysReddy\venv\lib\site-packages\httpx_transports\default.py", line 233, in handle_request resp = self._pool.handle_request(req) File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\connection_pool.py", line 216, in handle_request raise exc from None File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\connection_pool.py", line 196, in handle_request response = connection.handle_request( File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\connection.py", line 101, in handle_request return self._connection.handle_request(request) File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\http11.py", line 143, in handle_request raise exc File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\http11.py", line 93, in handle_request self._send_request_headers(**kwargs) File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\http11.py", line 151, in _send_request_headers with map_exceptions({h11.LocalProtocolError: LocalProtocolError}): File "C:\Users\username\AppData\Local\Programs\Python\Python310\lib\contextlib.py", line 153, in exit self.gen.throw(typ, value, traceback) File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_exceptions.py", line 14, in map_exceptions raise to_exc(exc) from exc httpcore.LocalProtocolError: Illegal header value b'Bearer '

The above exception was the direct cause of the following exception:

Traceback (most recent call last): File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 952, in _request response = self._client.send( File "C:\AlwaysReddy\venv\lib\site-packages\httpx_client.py", line 914, in send response = self._send_handling_auth( File "C:\AlwaysReddy\venv\lib\site-packages\httpx_client.py", line 942, in _send_handling_auth response = self._send_handling_redirects( File "C:\AlwaysReddy\venv\lib\site-packages\httpx_client.py", line 979, in _send_handling_redirects response = self._send_single_request(request) File "C:\AlwaysReddy\venv\lib\site-packages\httpx_client.py", line 1015, in _send_single_request response = transport.handle_request(request) File "C:\AlwaysReddy\venv\lib\site-packages\httpx_transports\default.py", line 232, in handle_request with map_httpcore_exceptions(): File "C:\Users\username\AppData\Local\Programs\Python\Python310\lib\contextlib.py", line 153, in exit self.gen.throw(typ, value, traceback) File "C:\AlwaysReddy\venv\lib\site-packages\httpx_transports\default.py", line 86, in map_httpcore_exceptions raise mapped_exc(message) from exc httpx.LocalProtocolError: Illegal header value b'Bearer '

The above exception was the direct cause of the following exception:

Traceback (most recent call last): File "C:\AlwaysReddy\TTS_apis\openai_api.py", line 33, in tts spoken_response = self.client.audio.speech.create( File "C:\AlwaysReddy\venv\lib\site-packages\openai\resources\audio\speech.py", line 86, in create return self._post( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 1240, in post return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 921, in request return self._request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 976, in _request return self._retry_request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 1053, in _retry_request return self._request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 976, in _request return self._retry_request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 1053, in _retry_request return self._request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 986, in _request raise APIConnectionError(request=request) from err openai.APIConnectionError: Connection error. Traceback (most recent call last): File "C:\AlwaysReddy\venv\lib\site-packages\httpx_transports\default.py", line 69, in map_httpcore_exceptions yield File "C:\AlwaysReddy\venv\lib\site-packages\httpx_transports\default.py", line 233, in handle_request resp = self._pool.handle_request(req) File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\connection_pool.py", line 216, in handle_request raise exc from None File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\connection_pool.py", line 196, in handle_request response = connection.handle_request( File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\connection.py", line 101, in handle_request return self._connection.handle_request(request) File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\http11.py", line 143, in handle_request raise exc File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\http11.py", line 93, in handle_request self._send_request_headers(**kwargs) File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_sync\http11.py", line 151, in _send_request_headers with map_exceptions({h11.LocalProtocolError: LocalProtocolError}): File "C:\Users\username\AppData\Local\Programs\Python\Python310\lib\contextlib.py", line 153, in exit self.gen.throw(typ, value, traceback) File "C:\AlwaysReddy\venv\lib\site-packages\httpcore_exceptions.py", line 14, in map_exceptions raise to_exc(exc) from exc httpcore.LocalProtocolError: Illegal header value b'Bearer '

The above exception was the direct cause of the following exception:

Traceback (most recent call last): File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 952, in _request response = self._client.send( File "C:\AlwaysReddy\venv\lib\site-packages\httpx_client.py", line 914, in send response = self._send_handling_auth( File "C:\AlwaysReddy\venv\lib\site-packages\httpx_client.py", line 942, in _send_handling_auth response = self._send_handling_redirects( File "C:\AlwaysReddy\venv\lib\site-packages\httpx_client.py", line 979, in _send_handling_redirects response = self._send_single_request(request) File "C:\AlwaysReddy\venv\lib\site-packages\httpx_client.py", line 1015, in _send_single_request response = transport.handle_request(request) File "C:\AlwaysReddy\venv\lib\site-packages\httpx_transports\default.py", line 232, in handle_request with map_httpcore_exceptions(): File "C:\Users\username\AppData\Local\Programs\Python\Python310\lib\contextlib.py", line 153, in exit self.gen.throw(typ, value, traceback) File "C:\AlwaysReddy\venv\lib\site-packages\httpx_transports\default.py", line 86, in map_httpcore_exceptions raise mapped_exc(message) from exc httpx.LocalProtocolError: Illegal header value b'Bearer '

The above exception was the direct cause of the following exception:

Traceback (most recent call last): File "C:\AlwaysReddy\TTS_apis\openai_api.py", line 33, in tts spoken_response = self.client.audio.speech.create( File "C:\AlwaysReddy\venv\lib\site-packages\openai\resources\audio\speech.py", line 86, in create return self._post( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 1240, in post return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 921, in request return self._request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 976, in _request return self._retry_request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 1053, in _retry_request return self._request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 976, in _request return self._retry_request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 1053, in _retry_request return self._request( File "C:\AlwaysReddy\venv\lib\site-packages\openai_base_client.py", line 986, in _request raise APIConnectionError(request=request) from err openai.APIConnectionError: Connection error. Completion successful. Full response: Hello! How can I assist you today?

Response: Hello! How can I assist you today?