[2023-03-07 06:42:01,003] ERROR in app: Exception on /callback [POST]
Traceback (most recent call last):
File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/flask/app.py", line 2528, in wsgi_app
response = self.full_dispatch_request()
File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/flask/app.py", line 1825, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/flask/app.py", line 1823, in full_dispatch_request
rv = self.dispatch_request()
File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/flask/app.py", line 1799, in dispatch_request
return self.ensure_sync(self.view_functions[rule.endpoint])(*view_args)
File "main.py", line 38, in callback
handler.handle(body, signature)
File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/linebot/webhook.py", line 259, in handle
self.__invoke_func(func, event, payload)
File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/linebot/webhook.py", line 271, in __invoke_func
func(event)
File "main.py", line 57, in handle_text_message
response = chatgpt.get_response(user_id, text)
File "/home/runner/ChatGPT-Line-Bot/src/chatgpt.py", line 12, in get_response
response = self.model.chat_completion(self.memory.get(user_id))
File "/home/runner/ChatGPT-Line-Bot/src/models.py", line 20, in chat_completion
response = openai.ChatCompletion.create(
File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/openai/api_resources/chat_completion.py", line 25, in create
return super().create(args, **kwargs)
File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/openai/api_resources/abstract/engine_apiresource.py", line 153, in create
response, , api_key = requestor.request(
File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/openai/api_requestor.py", line 226, in request
resp, got_stream = self._interpret_response(result, stream)
File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/openai/api_requestor.py", line 619, in _interpret_response
self._interpret_response_line(
File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/openai/api_requestor.py", line 679, in _interpret_response_line
raise self.handle_error_response(
openai.error.InvalidRequestError: This model's maximum context length is 4096 tokens. However, your messages resulted in 4117 tokens. Please reduce the length of the messages.
今天發現有訊息太長的錯誤訊息會造成系統崩潰
[2023-03-07 06:42:01,003] ERROR in app: Exception on /callback [POST] Traceback (most recent call last): File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/flask/app.py", line 2528, in wsgi_app response = self.full_dispatch_request() File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/flask/app.py", line 1825, in full_dispatch_request rv = self.handle_user_exception(e) File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/flask/app.py", line 1823, in full_dispatch_request rv = self.dispatch_request() File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/flask/app.py", line 1799, in dispatch_request return self.ensure_sync(self.view_functions[rule.endpoint])(*view_args) File "main.py", line 38, in callback handler.handle(body, signature) File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/linebot/webhook.py", line 259, in handle self.__invoke_func(func, event, payload) File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/linebot/webhook.py", line 271, in __invoke_func func(event) File "main.py", line 57, in handle_text_message response = chatgpt.get_response(user_id, text) File "/home/runner/ChatGPT-Line-Bot/src/chatgpt.py", line 12, in get_response response = self.model.chat_completion(self.memory.get(user_id)) File "/home/runner/ChatGPT-Line-Bot/src/models.py", line 20, in chat_completion response = openai.ChatCompletion.create( File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/openai/api_resources/chat_completion.py", line 25, in create return super().create(args, **kwargs) File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/openai/api_resources/abstract/engine_apiresource.py", line 153, in create response, , api_key = requestor.request( File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/openai/api_requestor.py", line 226, in request resp, got_stream = self._interpret_response(result, stream) File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/openai/api_requestor.py", line 619, in _interpret_response self._interpret_response_line( File "/home/runner/ChatGPT-Line-Bot/venv/lib/python3.10/site-packages/openai/api_requestor.py", line 679, in _interpret_response_line raise self.handle_error_response( openai.error.InvalidRequestError: This model's maximum context length is 4096 tokens. However, your messages resulted in 4117 tokens. Please reduce the length of the messages.