Traceback (most recent call last):
File "/content/ebook-GPT-translator/ebook-GPT-translator/pdf-epub-GPT-translator/text_translation.py", line 515, in
translated_short_text = translate_and_store(short_text)
File "/content/ebook-GPT-translator/ebook-GPT-translator/pdf-epub-GPT-translator/text_translation.py", line 355, in translate_and_store
translated_text = translate_text(text)
File "/content/ebook-GPT-translator/ebook-GPT-translator/pdf-epub-GPT-translator/text_translation.py", line 335, in translate_text
completion = create_chat_completion(prompt, text)
File "/content/ebook-GPT-translator/ebook-GPT-translator/pdf-epub-GPT-translator/text_translation.py", line 154, in create_chat_completion
return openai.ChatCompletion.create(
File "/usr/local/lib/python3.10/dist-packages/openai/api_resources/chat_completion.py", line 25, in create
return super().create(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/openai/api_resources/abstract/engine_apiresource.py", line 153, in create
response, , api_key = requestor.request(
File "/usr/local/lib/python3.10/dist-packages/openai/api_requestor.py", line 298, in request
resp, got_stream = self._interpret_response(result, stream)
File "/usr/local/lib/python3.10/dist-packages/openai/api_requestor.py", line 700, in _interpret_response
self._interpret_response_line(
File "/usr/local/lib/python3.10/dist-packages/openai/api_requestor.py", line 763, in _interpret_response_line
raise self.handle_error_response(
openai.error.InvalidRequestError: This model's maximum context length is 4097 tokens. However, your messages resulted in 4291 tokens. Please reduce the length of the messages.
Traceback (most recent call last): File "/content/ebook-GPT-translator/ebook-GPT-translator/pdf-epub-GPT-translator/text_translation.py", line 515, in
translated_short_text = translate_and_store(short_text)
File "/content/ebook-GPT-translator/ebook-GPT-translator/pdf-epub-GPT-translator/text_translation.py", line 355, in translate_and_store
translated_text = translate_text(text)
File "/content/ebook-GPT-translator/ebook-GPT-translator/pdf-epub-GPT-translator/text_translation.py", line 335, in translate_text
completion = create_chat_completion(prompt, text)
File "/content/ebook-GPT-translator/ebook-GPT-translator/pdf-epub-GPT-translator/text_translation.py", line 154, in create_chat_completion
return openai.ChatCompletion.create(
File "/usr/local/lib/python3.10/dist-packages/openai/api_resources/chat_completion.py", line 25, in create
return super().create(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/openai/api_resources/abstract/engine_apiresource.py", line 153, in create
response, , api_key = requestor.request(
File "/usr/local/lib/python3.10/dist-packages/openai/api_requestor.py", line 298, in request
resp, got_stream = self._interpret_response(result, stream)
File "/usr/local/lib/python3.10/dist-packages/openai/api_requestor.py", line 700, in _interpret_response
self._interpret_response_line(
File "/usr/local/lib/python3.10/dist-packages/openai/api_requestor.py", line 763, in _interpret_response_line
raise self.handle_error_response(
openai.error.InvalidRequestError: This model's maximum context length is 4097 tokens. However, your messages resulted in 4291 tokens. Please reduce the length of the messages.
可参数可配置吗?