Trying langgraph with a research task, replacing the prompt in the "Hierarchical Agent Teams" notebook with my own. Running a copy of the notebook within VSCode on Windows 11.
The graph runs, and I can see the run on langsmith, but now after some amount of time, I always get this error:
Error code 400: 'Web Scraper' does not match '^[a-zA-Z0-9_-]{1,64}$' - 'messages.3.name'
as part of this larger error trace:
{
"name": "BadRequestError",
"message": "Error code: 400 - {'error': {'message': \"'Web Scraper' does not match '^[a-zA-Z0-9_-]{1,64}$' - 'messages.3.name'\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"stack": "---------------------------------------------------------------------------
BadRequestError Traceback (most recent call last)
Cell In[15], line 1
----> 1 for s in super_graph.stream(
2 {
3 \"messages\": [
4 HumanMessage(
5 content=research_tasks[0]
6 )
7 ],
8 },
9 {\"recursion_limit\": 150},
10 ):
11 if \"__end__\" not in s:
12 print(s)
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langgraph\\pregel\\__init__.py:886, in Pregel.transform(self, input, config, output_keys, input_keys, interrupt_before_nodes, interrupt_after_nodes, debug, **kwargs)
874 def transform(
875 self,
876 input: Iterator[Union[dict[str, Any], Any]],
(...)
884 **kwargs: Any,
885 ) -> Iterator[Union[dict[str, Any], Any]]:
--> 886 for chunk in self._transform_stream_with_config(
887 input,
888 self._transform,
889 config,
890 output_keys=output_keys,
891 input_keys=input_keys,
892 interrupt_before_nodes=interrupt_before_nodes,
893 interrupt_after_nodes=interrupt_after_nodes,
894 debug=debug,
895 **kwargs,
896 ):
897 yield chunk
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langchain_core\\runnables\\base.py:1849, in Runnable._transform_stream_with_config(self, input, transformer, config, run_type, **kwargs)
1847 try:
1848 while True:
-> 1849 chunk: Output = context.run(next, iterator) # type: ignore
1850 yield chunk
1851 if final_output_supported:
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langgraph\\pregel\\__init__.py:572, in Pregel._transform(self, input, run_manager, config, **kwargs)
565 done, inflight = concurrent.futures.wait(
566 futures,
567 return_when=concurrent.futures.FIRST_EXCEPTION,
568 timeout=self.step_timeout,
569 )
571 # panic on failure or timeout
--> 572 _panic_or_proceed(done, inflight, step)
574 # apply writes to channels
575 _apply_writes(
576 checkpoint, channels, pending_writes, config, step + 1
577 )
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langgraph\\pregel\\__init__.py:990, in _panic_or_proceed(done, inflight, step)
988 inflight.pop().cancel()
989 # raise the exception
--> 990 raise exc
991 # TODO this is where retry of an entire step would happen
993 if inflight:
994 # if we got here means we timed out
File C:\\Program Files\\WindowsApps\\PythonSoftwareFoundation.Python.3.10_3.10.3056.0_x64__qbz5n2kfra8p0\\lib\\concurrent\\futures\\thread.py:58, in _WorkItem.run(self)
55 return
57 try:
---> 58 result = self.fn(*self.args, **self.kwargs)
59 except BaseException as exc:
60 self.future.set_exception(exc)
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langchain_core\\runnables\\base.py:4427, in RunnableBindingBase.invoke(self, input, config, **kwargs)
4421 def invoke(
4422 self,
4423 input: Input,
4424 config: Optional[RunnableConfig] = None,
4425 **kwargs: Optional[Any],
4426 ) -> Output:
-> 4427 return self.bound.invoke(
4428 input,
4429 self._merge_configs(config),
4430 **{**self.kwargs, **kwargs},
4431 )
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langchain_core\\runnables\\base.py:2415, in RunnableSequence.invoke(self, input, config)
2413 try:
2414 for i, step in enumerate(self.steps):
-> 2415 input = step.invoke(
2416 input,
2417 # mark each step as a child run
2418 patch_config(
2419 config, callbacks=run_manager.get_child(f\"seq:step:{i+1}\")
2420 ),
2421 )
2422 # finish the root run
2423 except BaseException as e:
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langgraph\\pregel\\__init__.py:838, in Pregel.invoke(self, input, config, output_keys, input_keys, interrupt_before_nodes, interrupt_after_nodes, debug, **kwargs)
825 def invoke(
826 self,
827 input: Union[dict[str, Any], Any],
(...)
835 **kwargs: Any,
836 ) -> Union[dict[str, Any], Any]:
837 latest: Union[dict[str, Any], Any] = None
--> 838 for chunk in self.stream(
839 input,
840 config,
841 output_keys=output_keys if output_keys is not None else self.output,
842 input_keys=input_keys,
843 interrupt_before_nodes=interrupt_before_nodes,
844 interrupt_after_nodes=interrupt_after_nodes,
845 debug=debug,
846 **kwargs,
847 ):
848 latest = chunk
849 return latest
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langgraph\\pregel\\__init__.py:886, in Pregel.transform(self, input, config, output_keys, input_keys, interrupt_before_nodes, interrupt_after_nodes, debug, **kwargs)
874 def transform(
875 self,
876 input: Iterator[Union[dict[str, Any], Any]],
(...)
884 **kwargs: Any,
885 ) -> Iterator[Union[dict[str, Any], Any]]:
--> 886 for chunk in self._transform_stream_with_config(
887 input,
888 self._transform,
889 config,
890 output_keys=output_keys,
891 input_keys=input_keys,
892 interrupt_before_nodes=interrupt_before_nodes,
893 interrupt_after_nodes=interrupt_after_nodes,
894 debug=debug,
895 **kwargs,
896 ):
897 yield chunk
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langchain_core\\runnables\\base.py:1849, in Runnable._transform_stream_with_config(self, input, transformer, config, run_type, **kwargs)
1847 try:
1848 while True:
-> 1849 chunk: Output = context.run(next, iterator) # type: ignore
1850 yield chunk
1851 if final_output_supported:
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langgraph\\pregel\\__init__.py:572, in Pregel._transform(self, input, run_manager, config, **kwargs)
565 done, inflight = concurrent.futures.wait(
566 futures,
567 return_when=concurrent.futures.FIRST_EXCEPTION,
568 timeout=self.step_timeout,
569 )
571 # panic on failure or timeout
--> 572 _panic_or_proceed(done, inflight, step)
574 # apply writes to channels
575 _apply_writes(
576 checkpoint, channels, pending_writes, config, step + 1
577 )
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langgraph\\pregel\\__init__.py:990, in _panic_or_proceed(done, inflight, step)
988 inflight.pop().cancel()
989 # raise the exception
--> 990 raise exc
991 # TODO this is where retry of an entire step would happen
993 if inflight:
994 # if we got here means we timed out
File C:\\Program Files\\WindowsApps\\PythonSoftwareFoundation.Python.3.10_3.10.3056.0_x64__qbz5n2kfra8p0\\lib\\concurrent\\futures\\thread.py:58, in _WorkItem.run(self)
55 return
57 try:
---> 58 result = self.fn(*self.args, **self.kwargs)
59 except BaseException as exc:
60 self.future.set_exception(exc)
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langchain_core\\runnables\\base.py:4427, in RunnableBindingBase.invoke(self, input, config, **kwargs)
4421 def invoke(
4422 self,
4423 input: Input,
4424 config: Optional[RunnableConfig] = None,
4425 **kwargs: Optional[Any],
4426 ) -> Output:
-> 4427 return self.bound.invoke(
4428 input,
4429 self._merge_configs(config),
4430 **{**self.kwargs, **kwargs},
4431 )
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langchain_core\\runnables\\base.py:2415, in RunnableSequence.invoke(self, input, config)
2413 try:
2414 for i, step in enumerate(self.steps):
-> 2415 input = step.invoke(
2416 input,
2417 # mark each step as a child run
2418 patch_config(
2419 config, callbacks=run_manager.get_child(f\"seq:step:{i+1}\")
2420 ),
2421 )
2422 # finish the root run
2423 except BaseException as e:
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langchain_core\\runnables\\base.py:4427, in RunnableBindingBase.invoke(self, input, config, **kwargs)
4421 def invoke(
4422 self,
4423 input: Input,
4424 config: Optional[RunnableConfig] = None,
4425 **kwargs: Optional[Any],
4426 ) -> Output:
-> 4427 return self.bound.invoke(
4428 input,
4429 self._merge_configs(config),
4430 **{**self.kwargs, **kwargs},
4431 )
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langchain_core\\language_models\\chat_models.py:153, in BaseChatModel.invoke(self, input, config, stop, **kwargs)
142 def invoke(
143 self,
144 input: LanguageModelInput,
(...)
148 **kwargs: Any,
149 ) -> BaseMessage:
150 config = ensure_config(config)
151 return cast(
152 ChatGeneration,
--> 153 self.generate_prompt(
154 [self._convert_input(input)],
155 stop=stop,
156 callbacks=config.get(\"callbacks\"),
157 tags=config.get(\"tags\"),
158 metadata=config.get(\"metadata\"),
159 run_name=config.get(\"run_name\"),
160 **kwargs,
161 ).generations[0][0],
162 ).message
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langchain_core\\language_models\\chat_models.py:546, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
538 def generate_prompt(
539 self,
540 prompts: List[PromptValue],
(...)
543 **kwargs: Any,
544 ) -> LLMResult:
545 prompt_messages = [p.to_messages() for p in prompts]
--> 546 return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langchain_core\\language_models\\chat_models.py:407, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
405 if run_managers:
406 run_managers[i].on_llm_error(e, response=LLMResult(generations=[]))
--> 407 raise e
408 flattened_outputs = [
409 LLMResult(generations=[res.generations], llm_output=res.llm_output) # type: ignore[list-item]
410 for res in results
411 ]
412 llm_output = self._combine_llm_outputs([res.llm_output for res in results])
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langchain_core\\language_models\\chat_models.py:397, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
394 for i, m in enumerate(messages):
395 try:
396 results.append(
--> 397 self._generate_with_cache(
398 m,
399 stop=stop,
400 run_manager=run_managers[i] if run_managers else None,
401 **kwargs,
402 )
403 )
404 except BaseException as e:
405 if run_managers:
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langchain_core\\language_models\\chat_models.py:589, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs)
585 raise ValueError(
586 \"Asked to cache, but no cache found at `langchain.cache`.\"
587 )
588 if inspect.signature(self._generate).parameters.get(\"run_manager\"):
--> 589 result = self._generate(
590 messages, stop=stop, run_manager=run_manager, **kwargs
591 )
592 else:
593 result = self._generate(messages, stop=stop, **kwargs)
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\langchain_openai\\chat_models\\base.py:484, in ChatOpenAI._generate(self, messages, stop, run_manager, stream, **kwargs)
478 message_dicts, params = self._create_message_dicts(messages, stop)
479 params = {
480 **params,
481 **({\"stream\": stream} if stream is not None else {}),
482 **kwargs,
483 }
--> 484 response = self.client.create(messages=message_dicts, **params)
485 return self._create_chat_result(response)
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\openai\\_utils\\_utils.py:275, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs)
273 msg = f\"Missing required argument: {quote(missing[0])}\"
274 raise TypeError(msg)
--> 275 return func(*args, **kwargs)
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\openai\\resources\\chat\\completions.py:663, in Completions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, presence_penalty, response_format, seed, stop, stream, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
611 @required_args([\"messages\", \"model\"], [\"messages\", \"model\", \"stream\"])
612 def create(
613 self,
(...)
661 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
662 ) -> ChatCompletion | Stream[ChatCompletionChunk]:
--> 663 return self._post(
664 \"/chat/completions\",
665 body=maybe_transform(
666 {
667 \"messages\": messages,
668 \"model\": model,
669 \"frequency_penalty\": frequency_penalty,
670 \"function_call\": function_call,
671 \"functions\": functions,
672 \"logit_bias\": logit_bias,
673 \"logprobs\": logprobs,
674 \"max_tokens\": max_tokens,
675 \"n\": n,
676 \"presence_penalty\": presence_penalty,
677 \"response_format\": response_format,
678 \"seed\": seed,
679 \"stop\": stop,
680 \"stream\": stream,
681 \"temperature\": temperature,
682 \"tool_choice\": tool_choice,
683 \"tools\": tools,
684 \"top_logprobs\": top_logprobs,
685 \"top_p\": top_p,
686 \"user\": user,
687 },
688 completion_create_params.CompletionCreateParams,
689 ),
690 options=make_request_options(
691 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
692 ),
693 cast_to=ChatCompletion,
694 stream=stream or False,
695 stream_cls=Stream[ChatCompletionChunk],
696 )
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\openai\\_base_client.py:1200, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
1186 def post(
1187 self,
1188 path: str,
(...)
1195 stream_cls: type[_StreamT] | None = None,
1196 ) -> ResponseT | _StreamT:
1197 opts = FinalRequestOptions.construct(
1198 method=\"post\", url=path, json_data=body, files=to_httpx_files(files), **options
1199 )
-> 1200 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\openai\\_base_client.py:889, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
880 def request(
881 self,
882 cast_to: Type[ResponseT],
(...)
887 stream_cls: type[_StreamT] | None = None,
888 ) -> ResponseT | _StreamT:
--> 889 return self._request(
890 cast_to=cast_to,
891 options=options,
892 stream=stream,
893 stream_cls=stream_cls,
894 remaining_retries=remaining_retries,
895 )
File ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\openai\\_base_client.py:980, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
977 err.response.read()
979 log.debug(\"Re-raising status error\")
--> 980 raise self._make_status_error_from_response(err.response) from None
982 return self._process_response(
983 cast_to=cast_to,
984 options=options,
(...)
987 stream_cls=stream_cls,
988 )
BadRequestError: Error code: 400 - {'error': {'message': \"'Web Scraper' does not match '^[a-zA-Z0-9_-]{1,64}$' - 'messages.3.name'\", 'type': 'invalid_request_error', 'param': None, 'code': None}}"
}
I can't find the regex in the workbook, so it must be somewhere in other code. The workbook puts a space in the name of 'Web Scraper' which appears to be causing the error. When I search and replace inside the notebook so all agent names omit spaces, then I stop getting this error. If this code is in langgraph then it should be patched to accept spaces in an agent name. If this code is not in langgraph, then the example workbooks should be patched to omit spaces in agent names.
Trying langgraph with a research task, replacing the prompt in the "Hierarchical Agent Teams" notebook with my own. Running a copy of the notebook within VSCode on Windows 11.
The graph runs, and I can see the run on langsmith, but now after some amount of time, I always get this error:
Error code 400: 'Web Scraper' does not match '^[a-zA-Z0-9_-]{1,64}$' - 'messages.3.name'
as part of this larger error trace:
I can't find the regex in the workbook, so it must be somewhere in other code. The workbook puts a space in the name of
'Web Scraper'
which appears to be causing the error. When I search and replace inside the notebook so all agent names omit spaces, then I stop getting this error. If this code is in langgraph then it should be patched to accept spaces in an agent name. If this code is not in langgraph, then the example workbooks should be patched to omit spaces in agent names.