langchain-ai / langchain

🦜🔗 Build context-aware reasoning applications
https://python.langchain.com
MIT License
89.3k stars 14.08k forks source link

tool_calling_agent with empty tools list is not working #22467

Open ZeroTimo opened 1 month ago

ZeroTimo commented 1 month ago

Checked other resources

Example Code

question_prompt = """You are an expert in process modeling and Petri Nets. Your task is to formulate questions based on a provided process description. """

prompt_question = ChatPromptTemplate.from_messages([ SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], template=question_prompt)), MessagesPlaceholder(variable_name='chat_history', optional=True), HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['input'], template='{input}')), MessagesPlaceholder(variable_name='agent_scratchpad', optional=True) ])

question_agent = create_tool_calling_agent(llm, [], prompt_question) question_agent_executor = AgentExecutor(agent=question_agent, tools=[], verbose=True)

response = question_agent_executor.invoke({"input": message})

Error Message and Stack Trace (if applicable)

{ "name": "BadRequestError", "message": "Error code: 400 - {'error': {'message': \"Invalid 'tools': empty array. Expected an array with minimum length 1, but got an empty array instead.\", 'type': 'invalid_request_error', 'param': 'tools', 'code': 'empty_array'}}", "stack": "--------------------------------------------------------------------------- BadRequestError Traceback (most recent call last) Cell In[8], line 5 1 process_description = \"\"\"A customer brings in a defective computer and the CRS checks the defect and hands out a repair cost calculation back. If the customer decides that the costs are acceptable, the process continues otherwise she takes her computer home unrepaired. The ongoing repair consists of two activities which are executed in an arbitrary order. The first activity is to check and repair the hardware, whereas the second activity checks and configures the software. After each of these activities, the proper system functionality is tested. If an error is detected, another arbitrary repair activity is executed; otherwise, the repair is finished. 2 \"\"\" 3 user_input = {\"messages\": process_description} ----> 5 for s in graph.stream( 6 {\"process_description\": [HumanMessage(content=process_description)]}, 7 {\"recursion_limit\": 14}, 8 ): 9 if \"end\" not in s: 10 print(s)

File /Applications/anaconda3/lib/python3.11/site-packages/langgraph/pregel/init.py:686, in Pregel.stream(self, input, config, stream_mode, output_keys, input_keys, interrupt_before_nodes, interrupt_after_nodes, debug) 679 done, inflight = concurrent.futures.wait( 680 futures, 681 return_when=concurrent.futures.FIRST_EXCEPTION, 682 timeout=self.step_timeout, 683 ) 685 # panic on failure or timeout --> 686 _panic_or_proceed(done, inflight, step) 688 # combine pending writes from all tasks 689 pending_writes = deque[tuple[str, Any]]()

File /Applications/anaconda3/lib/python3.11/site-packages/langgraph/pregel/init.py:1033, in _panic_or_proceed(done, inflight, step) 1031 inflight.pop().cancel() 1032 # raise the exception -> 1033 raise exc 1034 # TODO this is where retry of an entire step would happen 1036 if inflight: 1037 # if we got here means we timed out

File /Applications/anaconda3/lib/python3.11/concurrent/futures/thread.py:58, in _WorkItem.run(self) 55 return 57 try: ---> 58 result = self.fn(*self.args, **self.kwargs) 59 except BaseException as exc: 60 self.future.set_exception(exc)

File /Applications/anaconda3/lib/python3.11/site-packages/langchain_core/runnables/base.py:2399, in RunnableSequence.invoke(self, input, config) 2397 try: 2398 for i, step in enumerate(self.steps): -> 2399 input = step.invoke( 2400 input, 2401 # mark each step as a child run 2402 patch_config( 2403 config, callbacks=run_manager.get_child(f\"seq:step:{i+1}\") 2404 ), 2405 ) 2406 # finish the root run 2407 except BaseException as e:

File /Applications/anaconda3/lib/python3.11/site-packages/langchain_core/runnables/base.py:3863, in RunnableLambda.invoke(self, input, config, kwargs) 3861 \"\"\"Invoke this runnable synchronously.\"\"\" 3862 if hasattr(self, \"func\"): -> 3863 return self._call_with_config( 3864 self._invoke, 3865 input, 3866 self._config(config, self.func), 3867 kwargs, 3868 ) 3869 else: 3870 raise TypeError( 3871 \"Cannot invoke a coroutine function synchronously.\" 3872 \"Use ainvoke instead.\" 3873 )

File /Applications/anaconda3/lib/python3.11/site-packages/langchain_core/runnables/base.py:1509, in Runnable._call_with_config(self, func, input, config, run_type, kwargs) 1505 context = copy_context() 1506 context.run(_set_config_context, child_config) 1507 output = cast( 1508 Output, -> 1509 context.run( 1510 call_func_with_variable_args, # type: ignore[arg-type] 1511 func, # type: ignore[arg-type] 1512 input, # type: ignore[arg-type] 1513 config, 1514 run_manager, 1515 kwargs, 1516 ), 1517 ) 1518 except BaseException as e: 1519 run_manager.on_chain_error(e)

File /Applications/anaconda3/lib/python3.11/site-packages/langchain_core/runnables/config.py:365, in call_func_with_variable_args(func, input, config, run_manager, kwargs) 363 if run_manager is not None and accepts_run_manager(func): 364 kwargs[\"run_manager\"] = run_manager --> 365 return func(input, kwargs)

File /Applications/anaconda3/lib/python3.11/site-packages/langchain_core/runnables/base.py:3737, in RunnableLambda._invoke(self, input, run_manager, config, kwargs) 3735 output = chunk 3736 else: -> 3737 output = call_func_with_variable_args( 3738 self.func, input, config, run_manager, kwargs 3739 ) 3740 # If the output is a runnable, invoke it 3741 if isinstance(output, Runnable):

File /Applications/anaconda3/lib/python3.11/site-packages/langchain_core/runnables/config.py:365, in call_func_with_variable_args(func, input, config, run_manager, kwargs) 363 if run_manager is not None and accepts_run_manager(func): 364 kwargs[\"run_manager\"] = run_manager --> 365 return func(input, kwargs)

Cell In[6], line 84, in generateQuestions(state) 81 process_description = messages[-1] 83 # Invoke the solution executor with a dictionary containing 'input' ---> 84 response = question_agent_executor.invoke({\"input\": process_description}) 86 # Debugging Information 87 print(\"Response from question agent:\", response)

File /Applications/anaconda3/lib/python3.11/site-packages/langchain/chains/base.py:166, in Chain.invoke(self, input, config, **kwargs) 164 except BaseException as e: 165 run_manager.on_chain_error(e) --> 166 raise e 167 run_manager.on_chain_end(outputs) 169 if include_run_info:

File /Applications/anaconda3/lib/python3.11/site-packages/langchain/chains/base.py:156, in Chain.invoke(self, input, config, **kwargs) 153 try: 154 self._validate_inputs(inputs) 155 outputs = ( --> 156 self._call(inputs, run_manager=run_manager) 157 if new_arg_supported 158 else self._call(inputs) 159 ) 161 final_outputs: Dict[str, Any] = self.prep_outputs( 162 inputs, outputs, return_only_outputs 163 ) 164 except BaseException as e:

File /Applications/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1433, in AgentExecutor._call(self, inputs, run_manager) 1431 # We now enter the agent loop (until it returns something). 1432 while self._should_continue(iterations, time_elapsed): -> 1433 next_step_output = self._take_next_step( 1434 name_to_tool_map, 1435 color_mapping, 1436 inputs, 1437 intermediate_steps, 1438 run_manager=run_manager, 1439 ) 1440 if isinstance(next_step_output, AgentFinish): 1441 return self._return( 1442 next_step_output, intermediate_steps, run_manager=run_manager 1443 )

File /Applications/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1139, in AgentExecutor._take_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager) 1130 def _take_next_step( 1131 self, 1132 name_to_tool_map: Dict[str, BaseTool], (...) 1136 run_manager: Optional[CallbackManagerForChainRun] = None, 1137 ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]: 1138 return self._consume_next_step( -> 1139 [ 1140 a 1141 for a in self._iter_next_step( 1142 name_to_tool_map, 1143 color_mapping, 1144 inputs, 1145 intermediate_steps, 1146 run_manager, 1147 ) 1148 ] 1149 )

File /Applications/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1139, in (.0) 1130 def _take_next_step( 1131 self, 1132 name_to_tool_map: Dict[str, BaseTool], (...) 1136 run_manager: Optional[CallbackManagerForChainRun] = None, 1137 ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]: 1138 return self._consume_next_step( -> 1139 [ 1140 a 1141 for a in self._iter_next_step( 1142 name_to_tool_map, 1143 color_mapping, 1144 inputs, 1145 intermediate_steps, 1146 run_manager, 1147 ) 1148 ] 1149 )

File /Applications/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1167, in AgentExecutor._iter_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager) 1164 intermediate_steps = self._prepare_intermediate_steps(intermediate_steps) 1166 # Call the LLM to see what to do. -> 1167 output = self.agent.plan( 1168 intermediate_steps, 1169 callbacks=run_manager.get_child() if run_manager else None, 1170 **inputs, 1171 ) 1172 except OutputParserException as e: 1173 if isinstance(self.handle_parsing_errors, bool):

File /Applications/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:515, in RunnableMultiActionAgent.plan(self, intermediate_steps, callbacks, **kwargs) 507 final_output: Any = None 508 if self.stream_runnable: 509 # Use streaming to make sure that the underlying LLM is invoked in a 510 # streaming (...) 513 # Because the response from the plan is not a generator, we need to 514 # accumulate the output into final output and return that. --> 515 for chunk in self.runnable.stream(inputs, config={\"callbacks\": callbacks}): 516 if final_output is None: 517 final_output = chunk

File /Applications/anaconda3/lib/python3.11/site-packages/langchain_core/runnables/base.py:2775, in RunnableSequence.stream(self, input, config, kwargs) 2769 def stream( 2770 self, 2771 input: Input, 2772 config: Optional[RunnableConfig] = None, 2773 kwargs: Optional[Any], 2774 ) -> Iterator[Output]: -> 2775 yield from self.transform(iter([input]), config, **kwargs)

File /Applications/anaconda3/lib/python3.11/site-packages/langchain_core/runnables/base.py:2762, in RunnableSequence.transform(self, input, config, kwargs) 2756 def transform( 2757 self, 2758 input: Iterator[Input], 2759 config: Optional[RunnableConfig] = None, 2760 kwargs: Optional[Any], 2761 ) -> Iterator[Output]: -> 2762 yield from self._transform_stream_with_config( 2763 input, 2764 self._transform, 2765 patch_config(config, run_name=(config or {}).get(\"run_name\") or self.name), 2766 **kwargs, 2767 )

File /Applications/anaconda3/lib/python3.11/site-packages/langchain_core/runnables/base.py:1778, in Runnable._transform_stream_with_config(self, input, transformer, config, run_type, **kwargs) 1776 try: 1777 while True: -> 1778 chunk: Output = context.run(next, iterator) # type: ignore 1779 yield chunk 1780 if final_output_supported:

File /Applications/anaconda3/lib/python3.11/site-packages/langchain_core/runnables/base.py:2726, in RunnableSequence._transform(self, input, run_manager, config) 2717 for step in steps: 2718 final_pipeline = step.transform( 2719 final_pipeline, 2720 patch_config( (...) 2723 ), 2724 ) -> 2726 for output in final_pipeline: 2727 yield output

File /Applications/anaconda3/lib/python3.11/site-packages/langchain_core/runnables/base.py:1154, in Runnable.transform(self, input, config, **kwargs) 1151 final: Input 1152 got_first_val = False -> 1154 for ichunk in input: 1155 # The default implementation of transform is to buffer input and 1156 # then call stream. 1157 # It'll attempt to gather all input into a single chunk using 1158 # the + operator. 1159 # If the input is not addable, then we'll assume that we can 1160 # only operate on the last chunk, 1161 # and we'll iterate until we get to the last chunk. 1162 if not got_first_val: 1163 final = ichunk

File /Applications/anaconda3/lib/python3.11/site-packages/langchain_core/runnables/base.py:4644, in RunnableBindingBase.transform(self, input, config, kwargs) 4638 def transform( 4639 self, 4640 input: Iterator[Input], 4641 config: Optional[RunnableConfig] = None, 4642 kwargs: Any, 4643 ) -> Iterator[Output]: -> 4644 yield from self.bound.transform( 4645 input, 4646 self._merge_configs(config), 4647 {self.kwargs, **kwargs}, 4648 )

File /Applications/anaconda3/lib/python3.11/site-packages/langchain_core/runnables/base.py:1172, in Runnable.transform(self, input, config, kwargs) 1169 final = ichunk 1171 if got_first_val: -> 1172 yield from self.stream(final, config, kwargs)

File /Applications/anaconda3/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:265, in BaseChatModel.stream(self, input, config, stop, **kwargs) 258 except BaseException as e: 259 run_manager.on_llm_error( 260 e, 261 response=LLMResult( 262 generations=[[generation]] if generation else [] 263 ), 264 ) --> 265 raise e 266 else: 267 run_manager.on_llm_end(LLMResult(generations=[[generation]]))

File /Applications/anaconda3/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:245, in BaseChatModel.stream(self, input, config, stop, kwargs) 243 generation: Optional[ChatGenerationChunk] = None 244 try: --> 245 for chunk in self._stream(messages, stop=stop, kwargs): 246 if chunk.message.id is None: 247 chunk.message.id = f\"run-{run_manager.run_id}\"

File /Applications/anaconda3/lib/python3.11/site-packages/langchain_openai/chat_models/base.py:441, in ChatOpenAI._stream(self, messages, stop, run_manager, kwargs) 438 params = {params, kwargs, \"stream\": True} 440 default_chunk_class = AIMessageChunk --> 441 for chunk in self.client.create(messages=message_dicts, params): 442 if not isinstance(chunk, dict): 443 chunk = chunk.model_dump()

File /Applications/anaconda3/lib/python3.11/site-packages/openai/_utils/_utils.py:277, in required_args..inner..wrapper(*args, *kwargs) 275 msg = f\"Missing required argument: {quote(missing[0])}\" 276 raise TypeError(msg) --> 277 return func(args, **kwargs)

File /Applications/anaconda3/lib/python3.11/site-packages/openai/resources/chat/completions.py:581, in Completions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, presence_penalty, response_format, seed, stop, stream, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout) 550 @required_args([\"messages\", \"model\"], [\"messages\", \"model\", \"stream\"]) 551 def create( 552 self, (...) 579 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, 580 ) -> ChatCompletion | Stream[ChatCompletionChunk]: --> 581 return self._post( 582 \"/chat/completions\", 583 body=maybe_transform( 584 { 585 \"messages\": messages, 586 \"model\": model, 587 \"frequency_penalty\": frequency_penalty, 588 \"function_call\": function_call, 589 \"functions\": functions, 590 \"logit_bias\": logit_bias, 591 \"logprobs\": logprobs, 592 \"max_tokens\": max_tokens, 593 \"n\": n, 594 \"presence_penalty\": presence_penalty, 595 \"response_format\": response_format, 596 \"seed\": seed, 597 \"stop\": stop, 598 \"stream\": stream, 599 \"temperature\": temperature, 600 \"tool_choice\": tool_choice, 601 \"tools\": tools, 602 \"top_logprobs\": top_logprobs, 603 \"top_p\": top_p, 604 \"user\": user, 605 }, 606 completion_create_params.CompletionCreateParams, 607 ), 608 options=make_request_options( 609 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout 610 ), 611 cast_to=ChatCompletion, 612 stream=stream or False, 613 stream_cls=Stream[ChatCompletionChunk], 614 )

File /Applications/anaconda3/lib/python3.11/site-packages/openai/_base_client.py:1232, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls) 1218 def post( 1219 self, 1220 path: str, (...) 1227 stream_cls: type[_StreamT] | None = None, 1228 ) -> ResponseT | _StreamT: 1229 opts = FinalRequestOptions.construct( 1230 method=\"post\", url=path, json_data=body, files=to_httpx_files(files), **options 1231 ) -> 1232 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))

File /Applications/anaconda3/lib/python3.11/site-packages/openai/_base_client.py:921, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls) 912 def request( 913 self, 914 cast_to: Type[ResponseT], (...) 919 stream_cls: type[_StreamT] | None = None, 920 ) -> ResponseT | _StreamT: --> 921 return self._request( 922 cast_to=cast_to, 923 options=options, 924 stream=stream, 925 stream_cls=stream_cls, 926 remaining_retries=remaining_retries, 927 )

File /Applications/anaconda3/lib/python3.11/site-packages/openai/_base_client.py:1012, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls) 1009 err.response.read() 1011 log.debug(\"Re-raising status error\") -> 1012 raise self._make_status_error_from_response(err.response) from None 1014 return self._process_response( 1015 cast_to=cast_to, 1016 options=options, (...) 1019 stream_cls=stream_cls, 1020 )

BadRequestError: Error code: 400 - {'error': {'message': \"Invalid 'tools': empty array. Expected an array with minimum length 1, but got an empty array instead.\", 'type': 'invalid_request_error', 'param': 'tools', 'code': 'empty_array'}}" }

Description

I am trying to use an agent with a empty tools list. If i use the same code with an open source LLM it works, but with an OpenAi LLM i get the error message.

System Info

platform: mac Python: 3.10.2

eyurtsev commented 1 month ago

What do you expect the agent to do? Without tool the agent is equivalent to just a chat bot?

ZeroTimo commented 1 month ago

I want the agent to do a complex task. I read in some papers that agents are better at "thinking" and reasoning their answers than chatbots, even if you don't provide a tool for an agent.

keenborder786 commented 1 month ago

Just do the following, since you just want to use the ChatBot functionality. Agent is desgined to be use with tools.

from langchain_community.chat_models.openai import ChatOpenAI
from langchain.prompts import PromptTemplate,ChatPromptTemplate,SystemMessagePromptTemplate,MessagesPlaceholder,HumanMessagePromptTemplate

llm = ChatOpenAI()
prompt_question = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], template="Question: {question}")),
MessagesPlaceholder(variable_name='chat_history', optional=True)
])

chain = prompt_question | llm

print(chain.invoke({'question':'how are you doing?','chat_history':[]}))