langchain-ai / langchain-academy

https://academy.langchain.com
341 stars 243 forks source link

Module 5 - Lesson 5: BadRequestError: Error code: 400 #59

Open anuroop18 opened 4 hours ago

anuroop18 commented 4 hours ago

Found this while working on Module 5 - Lesson 5

Whenever i try to give multiple types of instructions in single message, i get following error:

---------------------------------------------------------------------------
BadRequestError                           Traceback (most recent call last)
----> 1 for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
      2     chunk["messages"][-1].pretty_print()

BadRequestError: Error code: 400 - {'error': {'message': "An assistant message with 'tool_calls' must be followed by tool messages responding to each 'tool_call_id'. The following tool_call_ids did not have response messages: call_scCoFzFQAedYvhaossU6hsjP, call_zoi06RPqwbp6PpoLbvy...

Example instruction:

  1. "My name is Lance. I like in India. I have to do workout in 5 hours. I have to sleep before 5 PM. Whenever you add todos combine the ones that are similar."
  2. "Clear all my profile info, all my todos, all my instructions."

I get this error whenever i give any complex instruction like:

Once the error occur, there is no option other then restarting notebook.. how can i manage this?

If any other information is needed, please let me know..

anuroop18 commented 3 hours ago

Here is complete error message:

{
    "name": "BadRequestError",
    "message": "Error code: 400 - {'error': {'message': \"An assistant message with 'tool_calls' must be followed by tool messages responding to each 'tool_call_id'. The following tool_call_ids did not have response messages: call_scCoFzFQAedYvhaossU6hsjP, call_zoi06RPqwbp6PpoLbvycgrWu\", 'type': 'invalid_request_error', 'param': 'messages', 'code': None}}",
    "stack": "---------------------------------------------------------------------------
BadRequestError                           Traceback (most recent call last)
Cell In[52], line 5
      2 input_messages = [HumanMessage(content=\"I think building the project will take 1 month not 60 minutes.\")]
      4 # Run the graph
----> 5 for chunk in graph.stream({\"messages\": input_messages}, config, stream_mode=\"values\"):
      6     chunk[\"messages\"][-1].pretty_print()

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langgraph/pregel/__init__.py:1573, in Pregel.stream(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, debug, subgraphs)
   1567     # Similarly to Bulk Synchronous Parallel / Pregel model
   1568     # computation proceeds in steps, while there are channel updates
   1569     # channel updates from step N are only visible in step N+1
   1570     # channels are guaranteed to be immutable for the duration of the step,
   1571     # with channel updates applied only at the transition between steps
   1572     while loop.tick(input_keys=self.input_channels):
-> 1573         for _ in runner.tick(
   1574             loop.tasks.values(),
   1575             timeout=self.step_timeout,
   1576             retry_policy=self.retry_policy,
   1577             get_waiter=get_waiter,
   1578         ):
   1579             # emit output
   1580             yield from output()
   1581 # emit output

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langgraph/pregel/runner.py:104, in PregelRunner.tick(self, tasks, reraise, timeout, retry_policy, get_waiter)
    102 t = tasks[0]
    103 try:
--> 104     run_with_retry(t, retry_policy, writer=writer)
    105     self.commit(t, None)
    106 except Exception as exc:

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langgraph/pregel/retry.py:40, in run_with_retry(task, retry_policy, writer)
     38 task.writes.clear()
     39 # run the task
---> 40 task.proc.invoke(task.input, config)
     41 # if successful, end
     42 break

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langgraph/utils/runnable.py:410, in RunnableSeq.invoke(self, input, config, **kwargs)
    408 context.run(_set_config_context, config)
    409 if i == 0:
--> 410     input = context.run(step.invoke, input, config, **kwargs)
    411 else:
    412     input = context.run(step.invoke, input, config)

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langgraph/utils/runnable.py:184, in RunnableCallable.invoke(self, input, config, **kwargs)
    182 else:
    183     context.run(_set_config_context, config)
--> 184     ret = context.run(self.func, input, **kwargs)
    185 if isinstance(ret, Runnable) and self.recurse:
    186     return ret.invoke(input, config)

Cell In[20], line 242, in update_todos(state, config, store)
    234 todo_extractor = create_extractor(
    235 model,
    236 tools=[ToDo],
    237 tool_choice=tool_name,
    238 enable_inserts=True
    239 ).with_listeners(on_end=spy)
    241 # Invoke the extractor
--> 242 result = todo_extractor.invoke({\"messages\": updated_messages, 
    243                                 \"existing\": existing_memories})
    245 # Save the memories from Trustcall to the store
    246 for r, rmeta in zip(result[\"responses\"], result[\"response_metadata\"]):

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langchain_core/runnables/base.py:5354, in RunnableBindingBase.invoke(self, input, config, **kwargs)
   5348 def invoke(
   5349     self,
   5350     input: Input,
   5351     config: Optional[RunnableConfig] = None,
   5352     **kwargs: Optional[Any],
   5353 ) -> Output:
-> 5354     return self.bound.invoke(
   5355         input,
   5356         self._merge_configs(config),
   5357         **{**self.kwargs, **kwargs},
   5358     )

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langchain_core/runnables/base.py:3024, in RunnableSequence.invoke(self, input, config, **kwargs)
   3022             input = context.run(step.invoke, input, config, **kwargs)
   3023         else:
-> 3024             input = context.run(step.invoke, input, config)
   3025 # finish the root run
   3026 except BaseException as e:

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langgraph/pregel/__init__.py:1844, in Pregel.invoke(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, debug, **kwargs)
   1842 else:
   1843     chunks = []
-> 1844 for chunk in self.stream(
   1845     input,
   1846     config,
   1847     stream_mode=stream_mode,
   1848     output_keys=output_keys,
   1849     interrupt_before=interrupt_before,
   1850     interrupt_after=interrupt_after,
   1851     debug=debug,
   1852     **kwargs,
   1853 ):
   1854     if stream_mode == \"values\":
   1855         latest = chunk

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langgraph/pregel/__init__.py:1573, in Pregel.stream(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, debug, subgraphs)
   1567     # Similarly to Bulk Synchronous Parallel / Pregel model
   1568     # computation proceeds in steps, while there are channel updates
   1569     # channel updates from step N are only visible in step N+1
   1570     # channels are guaranteed to be immutable for the duration of the step,
   1571     # with channel updates applied only at the transition between steps
   1572     while loop.tick(input_keys=self.input_channels):
-> 1573         for _ in runner.tick(
   1574             loop.tasks.values(),
   1575             timeout=self.step_timeout,
   1576             retry_policy=self.retry_policy,
   1577             get_waiter=get_waiter,
   1578         ):
   1579             # emit output
   1580             yield from output()
   1581 # emit output

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langgraph/pregel/runner.py:104, in PregelRunner.tick(self, tasks, reraise, timeout, retry_policy, get_waiter)
    102 t = tasks[0]
    103 try:
--> 104     run_with_retry(t, retry_policy, writer=writer)
    105     self.commit(t, None)
    106 except Exception as exc:

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langgraph/pregel/retry.py:40, in run_with_retry(task, retry_policy, writer)
     38 task.writes.clear()
     39 # run the task
---> 40 task.proc.invoke(task.input, config)
     41 # if successful, end
     42 break

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langgraph/utils/runnable.py:410, in RunnableSeq.invoke(self, input, config, **kwargs)
    408 context.run(_set_config_context, config)
    409 if i == 0:
--> 410     input = context.run(step.invoke, input, config, **kwargs)
    411 else:
    412     input = context.run(step.invoke, input, config)

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langgraph/utils/runnable.py:184, in RunnableCallable.invoke(self, input, config, **kwargs)
    182 else:
    183     context.run(_set_config_context, config)
--> 184     ret = context.run(self.func, input, **kwargs)
    185 if isinstance(ret, Runnable) and self.recurse:
    186     return ret.invoke(input, config)

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/trustcall/_base.py:880, in _Patch.invoke(self, state, config)
    879 def invoke(self, state: ExtendedExtractState, config: RunnableConfig) -> dict:
--> 880     msg = self.bound.invoke(state.messages, config)
    881     return self._tear_down(cast(AIMessage, msg), state.messages, state.tool_call_id)

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langchain_core/runnables/base.py:5354, in RunnableBindingBase.invoke(self, input, config, **kwargs)
   5348 def invoke(
   5349     self,
   5350     input: Input,
   5351     config: Optional[RunnableConfig] = None,
   5352     **kwargs: Optional[Any],
   5353 ) -> Output:
-> 5354     return self.bound.invoke(
   5355         input,
   5356         self._merge_configs(config),
   5357         **{**self.kwargs, **kwargs},
   5358     )

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:286, in BaseChatModel.invoke(self, input, config, stop, **kwargs)
    275 def invoke(
    276     self,
    277     input: LanguageModelInput,
   (...)
    281     **kwargs: Any,
    282 ) -> BaseMessage:
    283     config = ensure_config(config)
    284     return cast(
    285         ChatGeneration,
--> 286         self.generate_prompt(
    287             [self._convert_input(input)],
    288             stop=stop,
    289             callbacks=config.get(\"callbacks\"),
    290             tags=config.get(\"tags\"),
    291             metadata=config.get(\"metadata\"),
    292             run_name=config.get(\"run_name\"),
    293             run_id=config.pop(\"run_id\", None),
    294             **kwargs,
    295         ).generations[0][0],
    296     ).message

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:786, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
    778 def generate_prompt(
    779     self,
    780     prompts: list[PromptValue],
   (...)
    783     **kwargs: Any,
    784 ) -> LLMResult:
    785     prompt_messages = [p.to_messages() for p in prompts]
--> 786     return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:643, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
    641         if run_managers:
    642             run_managers[i].on_llm_error(e, response=LLMResult(generations=[]))
--> 643         raise e
    644 flattened_outputs = [
    645     LLMResult(generations=[res.generations], llm_output=res.llm_output)  # type: ignore[list-item]
    646     for res in results
    647 ]
    648 llm_output = self._combine_llm_outputs([res.llm_output for res in results])

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:633, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
    630 for i, m in enumerate(messages):
    631     try:
    632         results.append(
--> 633             self._generate_with_cache(
    634                 m,
    635                 stop=stop,
    636                 run_manager=run_managers[i] if run_managers else None,
    637                 **kwargs,
    638             )
    639         )
    640     except BaseException as e:
    641         if run_managers:

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:851, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs)
    849 else:
    850     if inspect.signature(self._generate).parameters.get(\"run_manager\"):
--> 851         result = self._generate(
    852             messages, stop=stop, run_manager=run_manager, **kwargs
    853         )
    854     else:
    855         result = self._generate(messages, stop=stop, **kwargs)

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/langchain_openai/chat_models/base.py:689, in BaseChatOpenAI._generate(self, messages, stop, run_manager, **kwargs)
    687     generation_info = {\"headers\": dict(raw_response.headers)}
    688 else:
--> 689     response = self.client.create(**payload)
    690 return self._create_chat_result(response, generation_info)

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/openai/_utils/_utils.py:275, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs)
    273             msg = f\"Missing required argument: {quote(missing[0])}\"
    274     raise TypeError(msg)
--> 275 return func(*args, **kwargs)

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/openai/resources/chat/completions.py:829, in Completions.create(self, messages, model, audio, frequency_penalty, function_call, functions, logit_bias, logprobs, max_completion_tokens, max_tokens, metadata, modalities, n, parallel_tool_calls, prediction, presence_penalty, response_format, seed, service_tier, stop, store, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
    788 @required_args([\"messages\", \"model\"], [\"messages\", \"model\", \"stream\"])
    789 def create(
    790     self,
   (...)
    826     timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
    827 ) -> ChatCompletion | Stream[ChatCompletionChunk]:
    828     validate_response_format(response_format)
--> 829     return self._post(
    830         \"/chat/completions\",
    831         body=maybe_transform(
    832             {
    833                 \"messages\": messages,
    834                 \"model\": model,
    835                 \"audio\": audio,
    836                 \"frequency_penalty\": frequency_penalty,
    837                 \"function_call\": function_call,
    838                 \"functions\": functions,
    839                 \"logit_bias\": logit_bias,
    840                 \"logprobs\": logprobs,
    841                 \"max_completion_tokens\": max_completion_tokens,
    842                 \"max_tokens\": max_tokens,
    843                 \"metadata\": metadata,
    844                 \"modalities\": modalities,
    845                 \"n\": n,
    846                 \"parallel_tool_calls\": parallel_tool_calls,
    847                 \"prediction\": prediction,
    848                 \"presence_penalty\": presence_penalty,
    849                 \"response_format\": response_format,
    850                 \"seed\": seed,
    851                 \"service_tier\": service_tier,
    852                 \"stop\": stop,
    853                 \"store\": store,
    854                 \"stream\": stream,
    855                 \"stream_options\": stream_options,
    856                 \"temperature\": temperature,
    857                 \"tool_choice\": tool_choice,
    858                 \"tools\": tools,
    859                 \"top_logprobs\": top_logprobs,
    860                 \"top_p\": top_p,
    861                 \"user\": user,
    862             },
    863             completion_create_params.CompletionCreateParams,
    864         ),
    865         options=make_request_options(
    866             extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
    867         ),
    868         cast_to=ChatCompletion,
    869         stream=stream or False,
    870         stream_cls=Stream[ChatCompletionChunk],
    871     )

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/openai/_base_client.py:1278, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
   1264 def post(
   1265     self,
   1266     path: str,
   (...)
   1273     stream_cls: type[_StreamT] | None = None,
   1274 ) -> ResponseT | _StreamT:
   1275     opts = FinalRequestOptions.construct(
   1276         method=\"post\", url=path, json_data=body, files=to_httpx_files(files), **options
   1277     )
-> 1278     return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/openai/_base_client.py:955, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
    952 else:
    953     retries_taken = 0
--> 955 return self._request(
    956     cast_to=cast_to,
    957     options=options,
    958     stream=stream,
    959     stream_cls=stream_cls,
    960     retries_taken=retries_taken,
    961 )

File ~/projects/langchain-academy/venv/lib/python3.12/site-packages/openai/_base_client.py:1059, in SyncAPIClient._request(self, cast_to, options, retries_taken, stream, stream_cls)
   1056         err.response.read()
   1058     log.debug(\"Re-raising status error\")
-> 1059     raise self._make_status_error_from_response(err.response) from None
   1061 return self._process_response(
   1062     cast_to=cast_to,
   1063     options=options,
   (...)
   1067     retries_taken=retries_taken,
   1068 )

BadRequestError: Error code: 400 - {'error': {'message': \"An assistant message with 'tool_calls' must be followed by tool messages responding to each 'tool_call_id'. The following tool_call_ids did not have response messages: call_scCoFzFQAedYvhaossU6hsjP, call_zoi06RPqwbp6PpoLbvycgrWu\", 'type': 'invalid_request_error', 'param': 'messages', 'code': None}}"
}