openai / openai-python

The official Python library for the OpenAI API
https://pypi.org/project/openai/
Apache License 2.0
21.98k stars 3.03k forks source link

Something went wrong during completion 2. Reason: Message text is empty #1545

Closed vtayh closed 2 months ago

vtayh commented 2 months ago

Confirm this is an issue with the Python library and not an underlying OpenAI API

Describe the bug

Something went wrong during completion 2. Reason: Message text is empty

To Reproduce

chatgpt_telegram_bot | 2024-07-14 21:40:09,080 - DEBUG - httpcore.http11 - response_closed.complete chatgpt_telegram_bot | 2024-07-14 21:40:09,080 - DEBUG - openai_utils - Pre-processed answer: chatgpt_telegram_bot | 2024-07-14 21:40:09,080 - DEBUG - openai_utils - Post-processed answer: chatgpt_telegram_bot | 2024-07-14 21:40:09,081 - ERROR - openai_utils - Message text is empty chatgpt_telegram_bot | 2024-07-14 21:40:09,081 - ERROR - openai_utils - Exception: Message text is empty chatgpt_telegram_bot | 2024-07-14 21:40:09,082 - ERROR - main - Something went wrong during completion 2. Reason: Message text is empty chatgpt_telegram_bot | 2024-07-14 21:40:09,082 - DEBUG - telegram.ext.ExtBot - Passing request through rate limiter of type <class 'telegram.ext._aioratelimiter.AIORateLimiter'> with rate_limit_args None chatgpt_telegram_bot | 2024-07-14 21:40:09,083 - DEBUG - telegram.ext.ExtBot - Calling Bot API endpoint sendMessage with parameters {'chat_id': 5212252839, 'text': 'Something went wrong during completion 2. Reason: Message text is empty'}

Code snippets

class ChatGPT:
    def __init__(self, model="gpt-4-1106-preview"):
        assert model in {
            "text-davinci-003", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "gpt-4", "gpt-4-1106-preview", 
            "gpt-4-vision-preview", "gpt-4-turbo-2024-04-09", "gpt-4o"
        }, f"Unknown model: {model}"
        self.model = model
        self.logger = logging.getLogger(__name__)
        self.headers = {
            "Authorization": f"Bearer {config.openai_api_key}",
            "Content-Type": "application/json",
        }
        self.client = global_client

    async def send_message(self, message, dialog_messages=[], chat_mode="assistant"):
        if chat_mode not in config.chat_modes.keys():
            raise ValueError(f"Chat mode {chat_mode} is not supported")

        n_dialog_messages_before = len(dialog_messages)
        answer = None
        n_input_tokens, n_output_tokens = 0, 0
        n_first_dialog_messages_removed = 0
        while answer is None:
            try:
                if self.model in {"gpt-3.5-turbo-16k", "gpt-3.5-turbo", "gpt-4", "gpt-4-1106-preview", "gpt-4-vision-preview", "gpt-4-turbo-2024-04-09", "gpt-4o"}:
                    messages = self._generate_prompt_messages(message, dialog_messages, chat_mode)
                    self.logger.debug(f"Generated messages: {messages}")
                    validate_payload({
                        "model": self.model,
                        "messages": messages,
                        **OPENAI_COMPLETION_OPTIONS
                    })
                    r = await self.client.chat.completions.create(
                        model=self.model,
                        messages=messages,
                        **OPENAI_COMPLETION_OPTIONS
                    )
                    self.logger.debug(f"OpenAI API response: {r}")
                    if not r.choices or not r.choices[0].message or not r.choices[0].message.content:
                        self.logger.error("Received empty message content from OpenAI API.")
                        raise ValueError("Received empty message content from OpenAI API.")
                    answer = r.choices[0].message.content
                elif self.model == "text-davinci-003":
                    prompt = self._generate_prompt(message, dialog_messages, chat_mode)
                    self.logger.debug(f"Generated prompt: {prompt}")
                    validate_payload({
                        "model": self.model,
                        "prompt": prompt,
                        **OPENAI_COMPLETION_OPTIONS
                    })
                    r = await self.client.completions.create(
                        model=self.model,
                        prompt=prompt,
                        **OPENAI_COMPLETION_OPTIONS
                    )
                    self.logger.debug(f"OpenAI API response: {r}")
                    if not r.choices or not r.choices[0].text:
                        self.logger.error("Received empty message content from OpenAI API.")
                        raise ValueError("Received empty message content from OpenAI API.")
                    answer = r.choices[0].text
                else:
                    raise ValueError(f"Unknown model: {self.model}")

                answer = self._postprocess_answer(answer)
                if not answer.strip():
                    self.logger.error("Post-processed answer is empty")
                    raise ValueError("Post-processed answer is empty")
                n_input_tokens, n_output_tokens = r.usage.prompt_tokens, r.usage.completion_tokens
            except Exception as e:
                self.logger.error(f"Exception: {str(e)}")
                if len(dialog_messages) == 0:
                    raise ValueError("Dialog messages is reduced to zero, but still has too many tokens to make completion") from e

                dialog_messages = dialog_messages[1:]

        n_first_dialog_messages_removed = n_dialog_messages_before - len(dialog_messages)

        return answer, (n_input_tokens, n_output_tokens), n_first_dialog_messages_removed

    async def send_message_stream(self, message, dialog_messages=[], chat_mode="assistant"):
        if chat_mode not in config.chat_modes.keys():
            raise ValueError(f"Chat mode {chat_mode} is not supported")

        n_dialog_messages_before = len(dialog_messages)
        answer = None
        n_input_tokens, n_output_tokens, n_first_dialog_messages_removed = 0, 0, 0
        while answer is None:
            try:
                if self.model in {"gpt-3.5-turbo-16k", "gpt-3.5-turbo", "gpt-4", "gpt-4-1106-preview", "gpt-4-turbo-2024-04-09", "gpt-4o"}:
                    messages = self._generate_prompt_messages(message, dialog_messages, chat_mode)
                    self.logger.debug(f"Generated messages: {messages}")

                    r_gen = await self.client.chat.completions.create(
                        model=self.model,
                        messages=messages,
                        stream=True,
                        **OPENAI_COMPLETION_OPTIONS
                    )

                    answer = ""
                    async for r_item in r_gen:
                        delta = r_item.choices[0].delta

                        if "content" in delta:
                            answer += delta.content
                            n_input_tokens, n_output_tokens = self._count_tokens_from_messages(messages, answer, model=self.model)
                            n_first_dialog_messages_removed = 0

                            yield "not_finished", answer, (n_input_tokens, n_output_tokens), n_first_dialog_messages_removed

                elif self.model == "text-davinci-003":
                    prompt = self._generate_prompt(message, dialog_messages, chat_mode)
                    self.logger.debug(f"Generated prompt: {prompt}")
                    r_gen = self.client.completions.create(
                        model=self.model,
                        prompt=prompt,
                        stream=True,
                        **OPENAI_COMPLETION_OPTIONS
                    )

                    answer = ""
                    async for r_item in r_gen:
                        if not r_item.choices or not r_item.choices[0].text:
                            self.logger.error("Received empty message content from OpenAI API stream.")
                            raise ValueError("Received empty message content from OpenAI API stream.")
                        answer += r_item.choices[0].text
                        n_input_tokens, n_output_tokens = self._count_tokens_from_prompt(prompt, answer, model=self.model)
                        n_first_dialog_messages_removed = n_dialog_messages_before - len(dialog_messages)
                        yield "not_finished", answer, (n_input_tokens, n_output_tokens), n_first_dialog_messages_removed

                answer = self._postprocess_answer(answer)
                if not answer.strip():
                    self.logger.error("Message text is empty")
                    raise ValueError("Message text is empty")

            except Exception as e:
                self.logger.error(f"Exception: {str(e)}")
                if len(dialog_messages) == 0:
                    raise e

                dialog_messages = dialog_messages[1:]
                n_first_dialog_messages_removed = n_dialog_messages_before - len(dialog_messages)

        yield "finished", answer, (n_input_tokens, n_output_tokens), n_first_dialog_messages_removed

    async def send_vision_message(
        self,
        message,
        dialog_messages=[],
        chat_mode="assistant",
        image_buffer: BytesIO = None,
    ):
        n_dialog_messages_before = len(dialog_messages)
        answer = None
        n_input_tokens, n_output_tokens = 0, 0
        n_first_dialog_messages_removed = 0
        while answer is None:
            try:
                if self.model == "gpt-4-vision-preview":
                    messages = self._generate_prompt_messages(
                        message, dialog_messages, chat_mode, image_buffer
                    )
                    self.logger.debug(f"Generated messages: {messages}")
                    r = await self.client.chat.completions.create(
                        model=self.model,
                        messages=messages,
                        **OPENAI_COMPLETION_OPTIONS
                    )
                    self.logger.debug(f"OpenAI API response: {r}")
                    if not r.choices or not r.choices[0].message or not r.choices[0].message.content:
                        self.logger.error("Received empty message content from OpenAI API.")
                        raise ValueError("Received empty message content from OpenAI API.")
                    answer = r.choices[0].message.content
                else:
                    raise ValueError(f"Unsupported model: {self.model}")

                answer = self._postprocess_answer(answer)
                if not answer.strip():
                    self.logger.error("Message text is empty")
                    raise ValueError("Message text is empty")
                n_input_tokens, n_output_tokens = (
                    r.usage.prompt_tokens,
                    r.usage.completion_tokens,
                )
            except Exception as e:
                self.logger.error(f"Exception: {str(e)}")
                if len(dialog_messages) == 0:
                    raise ValueError(
                        "Dialog messages is reduced to zero, but still has too many tokens to make completion"
                    ) from e

                dialog_messages = dialog_messages[1:]

            n_first_dialog_messages_removed = n_dialog_messages_before - len(dialog_messages)

        return (
            answer,
            (n_input_tokens, n_output_tokens),
            n_first_dialog_messages_removed,
        )

    async def send_vision_message_stream(
        self,
        message,
        dialog_messages=[],
        chat_mode="assistant",
        image_buffer: BytesIO = None,
    ):
        n_dialog_messages_before = len(dialog_messages)
        answer = None
        n_input_tokens, n_output_tokens = 0, 0
        n_first_dialog_messages_removed = 0
        while answer is None:
            try:
                if self.model == "gpt-4-vision-preview":
                    messages = self._generate_prompt_messages(
                        message, dialog_messages, chat_mode, image_buffer
                    )
                    self.logger.debug(f"Generated messages: {messages}")

                    r_gen = await self.client.chat.completions.create(
                        model=self.model,
                        messages=messages,
                        stream=True,
                        **OPENAI_COMPLETION_OPTIONS,
                    )

                    answer = ""
                    async for r_item in r_gen:
                        delta = r_item.choices[0].delta
                        if "content" in delta:
                            answer += delta.content
                            (
                                n_input_tokens,
                                n_output_tokens,
                            ) = self._count_tokens_from_messages(
                                messages, answer, model=self.model
                            )
                            n_first_dialog_messages_removed = (
                                n_dialog_messages_before - len(dialog_messages)
                            )
                            yield "not_finished", answer, (
                                n_input_tokens,
                                n_output_tokens,
                            ), n_first_dialog_messages_removed

                answer = self._postprocess_answer(answer)
                if not answer.strip():
                    self.logger.error("Message text is empty")
                    raise ValueError("Message text is empty")

            except Exception as e:
                self.logger.error(f"Exception: {str(e)}")
                if len(dialog_messages) == 0:
                    raise e
                dialog_messages = dialog_messages[1:]

        yield "finished", answer, (
            n_input_tokens,
            n_output_tokens,
        ), n_first_dialog_messages_removed

    def _generate_prompt(self, message, dialog_messages, chat_mode):
        prompt = config.chat_modes[chat_mode]["prompt_start"]
        prompt += "\n\n"

        if len(dialog_messages) > 0:
            prompt += "Chat:\n"
            for dialog_message in dialog_messages:
                prompt += f"User: {dialog_message['user']}\n"
                prompt += f"Assistant: {dialog_message['bot']}\n"

        prompt += f"User: {message}\n"
        prompt += "Assistant: "

        return prompt

    def _encode_image(self, image_buffer: BytesIO) -> bytes:
        return base64.b64encode(image_buffer.read()).decode("utf-8")

    def _generate_prompt_messages(self, message, dialog_messages, chat_mode, image_buffer: BytesIO = None):
        prompt = config.chat_modes[chat_mode]["prompt_start"]

        messages = [{"role": "system", "content": prompt}]

        for dialog_message in dialog_messages:
            messages.append({"role": "user", "content": dialog_message["user"]})
            messages.append({"role": "assistant", "content": dialog_message["bot"]})

        if image_buffer is not None:
            messages.append(
                {
                    "role": "user", 
                    "content": [
                        {
                            "type": "text",
                            "text": message,
                        },
                        {
                            "type": "image",
                            "image": self._encode_image(image_buffer),
                        }
                    ]
                }

            )
        else:
            messages.append({"role": "user", "content": message})

        return messages

    def _postprocess_answer(self, answer):
        self.logger.debug(f"Pre-processed answer: {answer}")
        answer = answer.strip()
        self.logger.debug(f"Post-processed answer: {answer}")
        return answer

    def _count_tokens_from_messages(self, messages, answer, model="gpt-4-1106-preview"):
        encoding = tiktoken.encoding_for_model(model)

        tokens_per_message = 3
        tokens_per_name = 1

        if model.startswith("gpt-3"):
            tokens_per_message = 4
            tokens_per_name = -1
        elif model.startswith("gpt-4"):
            tokens_per_message = 3
            tokens_per_name = 1 
        else:
            raise ValueError(f"Unknown model: {model}")

        n_input_tokens = 0
        for message in messages:
            n_input_tokens += tokens_per_message
            if isinstance(message["content"], list):
                for sub_message in message["content"]:
                    if "type" in sub_message:
                        if sub_message["type"] == "text":
                            n_input_tokens += len(encoding.encode(sub_message["text"]))
                        elif sub_message["type"] == "image_url":
                            pass
            else:
                if "type" in message:
                    if message["type"] == "text":
                        n_input_tokens += len(encoding.encode(message["text"]))
                    elif message["type"] == "image_url":
                        pass

        n_input_tokens += 2

        n_output_tokens = 1 + len(encoding.encode(answer))

        return n_input_tokens, n_output_tokens

    def _count_tokens_from_prompt(self, prompt, answer, model="text-davinci-003"):
        encoding = tiktoken.encoding_for_model(model)

        n_input_tokens = len(encoding.encode(prompt)) + 1
        n_output_tokens = len(encoding.encode(answer))

        return n_input_tokens, n_output_tokens

async def transcribe_audio(audio_file) -> str:
    r = await global_client.audio.transcriptions.create(
        model="whisper-1",
        file=audio_file
    )
    return r.text or ""

async def generate_images(prompt, model="dall-e-2", n_images=4, size="1024x1024", quality="standard"):
    if model=="dalle-2":
        model="dall-e-2"
        quality="standard"

    if model=="dalle-3":
        model="dall-e-3"
        n_images=1

    response = await global_client.images.generate(
        model=model,
        prompt=prompt,
        n=n_images,
        size=size,
        quality=quality
    )

    image_urls = [item.url for item in response.data]
    return image_urls

async def is_content_acceptable(prompt):
    r = await global_client.moderations.create(input=prompt)
    return not all(r.results[0].categories.values())

OS

Debian 12.0

Python version

Python 3.8

Library version

openAI 1.35

RobertCraigie commented 2 months ago

This appears to be related to the API, not the SDK itself, please ask for help in the community forum: https://community.openai.com/