Open Ruhil-DS opened 3 months ago
Do not call agent.invoke
directly but use agent_executor = AgentExecutor(agent=agent, tools=tools) result = agent_executor.invoke({"input": "some question?"})
@liugddx I'm new to LangChain. Please help me with a little more details.
I saw that initialize_agent
has been deprecated.
The AgentExecutor you mentioned, needs an agent as a parameter. How else can I create this agent?
I tried it with the agent created using initialize_agent
and ended up getting an error:
from langchain.agents import AgentExecutor
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "what is the latest MacOS"})
TypeError: Agent.plan() got multiple values for argument 'intermediate_steps'
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[15], line 1
----> 1 agent_executor.invoke({"input": "what is the latest MacOS"})
File ~/anaconda3/lib/python3.11/site-packages/langchain/chains/base.py:163, in Chain.invoke(self, input, config, **kwargs)
161 except BaseException as e:
162 run_manager.on_chain_error(e)
--> 163 raise e
164 run_manager.on_chain_end(outputs)
166 if include_run_info:
File ~/anaconda3/lib/python3.11/site-packages/langchain/chains/base.py:153, in Chain.invoke(self, input, config, **kwargs)
150 try:
151 self._validate_inputs(inputs)
152 outputs = (
--> 153 self._call(inputs, run_manager=run_manager)
154 if new_arg_supported
155 else self._call(inputs)
156 )
158 final_outputs: Dict[str, Any] = self.prep_outputs(
159 inputs, outputs, return_only_outputs
160 )
161 except BaseException as e:
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1432, in AgentExecutor._call(self, inputs, run_manager)
1430 # We now enter the agent loop (until it returns something).
1431 while self._should_continue(iterations, time_elapsed):
-> 1432 next_step_output = self._take_next_step(
1433 name_to_tool_map,
1434 color_mapping,
1435 inputs,
1436 intermediate_steps,
1437 run_manager=run_manager,
1438 )
1439 if isinstance(next_step_output, AgentFinish):
1440 return self._return(
1441 next_step_output, intermediate_steps, run_manager=run_manager
1442 )
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1138, in AgentExecutor._take_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)
1129 def _take_next_step(
1130 self,
1131 name_to_tool_map: Dict[str, BaseTool],
(...)
1135 run_manager: Optional[CallbackManagerForChainRun] = None,
1136 ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:
1137 return self._consume_next_step(
-> 1138 [
1139 a
1140 for a in self._iter_next_step(
1141 name_to_tool_map,
1142 color_mapping,
1143 inputs,
1144 intermediate_steps,
1145 run_manager,
1146 )
1147 ]
1148 )
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1138, in <listcomp>(.0)
1129 def _take_next_step(
1130 self,
1131 name_to_tool_map: Dict[str, BaseTool],
(...)
1135 run_manager: Optional[CallbackManagerForChainRun] = None,
1136 ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:
1137 return self._consume_next_step(
-> 1138 [
1139 a
1140 for a in self._iter_next_step(
1141 name_to_tool_map,
1142 color_mapping,
1143 inputs,
1144 intermediate_steps,
1145 run_manager,
1146 )
1147 ]
1148 )
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1166, in AgentExecutor._iter_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)
1163 intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
1165 # Call the LLM to see what to do.
-> 1166 output = self.agent.plan(
1167 intermediate_steps,
1168 callbacks=run_manager.get_child() if run_manager else None,
1169 **inputs,
1170 )
1171 except OutputParserException as e:
1172 if isinstance(self.handle_parsing_errors, bool):
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:397, in RunnableAgent.plan(self, intermediate_steps, callbacks, **kwargs)
389 final_output: Any = None
390 if self.stream_runnable:
391 # Use streaming to make sure that the underlying LLM is invoked in a
392 # streaming
(...)
395 # Because the response from the plan is not a generator, we need to
396 # accumulate the output into final output and return that.
--> 397 for chunk in self.runnable.stream(inputs, config={"callbacks": callbacks}):
398 if final_output is None:
399 final_output = chunk
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1571, in AgentExecutor.stream(self, input, config, **kwargs)
1560 config = ensure_config(config)
1561 iterator = AgentExecutorIterator(
1562 self,
1563 input,
(...)
1569 **kwargs,
1570 )
-> 1571 for step in iterator:
1572 yield step
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent_iterator.py:174, in AgentExecutorIterator.__iter__(self)
168 while self.agent_executor._should_continue(
169 self.iterations, self.time_elapsed
170 ):
171 # take the next step: this plans next action, executes it,
172 # yielding action and observation as they are generated
173 next_step_seq: NextStepOutput = []
--> 174 for chunk in self.agent_executor._iter_next_step(
175 self.name_to_tool_map,
176 self.color_mapping,
177 self.inputs,
178 self.intermediate_steps,
179 run_manager,
180 ):
181 next_step_seq.append(chunk)
182 # if we're yielding actions, yield them as they come
183 # do not yield AgentFinish, which will be handled below
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1166, in AgentExecutor._iter_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)
1163 intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
1165 # Call the LLM to see what to do.
-> 1166 output = self.agent.plan(
1167 intermediate_steps,
1168 callbacks=run_manager.get_child() if run_manager else None,
1169 **inputs,
1170 )
1171 except OutputParserException as e:
1172 if isinstance(self.handle_parsing_errors, bool):
TypeError: Agent.plan() got multiple values for argument 'intermediate_steps'
@Ruhil-DS Try creating your agent like so and use ChatPromptTemplate for the question :
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
llm_with_tools = llm.bind(functions=tools)
prompt = ChatPromptTemplate.from_messages(
[
# MessagesPlaceholder(variable_name="chat_history"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad")
]
)
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad":lambda x: format_to_openai_function_messages(
x["intermediate_steps"]
)
}
| prompt
| llm_with_tools
| OpenAIFunctionsAgentOutputParser()
)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
result = agent_executor.invoke({"input": "ASK YOUR QUESTION HERE ???"})
print(result)
Checked other resources
Example Code
Error Message and Stack Trace (if applicable)
Final error
Stack Trace
Description
So, depending on the question, sometimes there is an error (mentioned above) and sometimes the agent runs without any issues.
I'll be sure to give you 2 questions, one for each case.
Working example
output
(some summary content follows the screenshot)
Note: See how the JSON Action output has 2 keys:
Non-working example
output
This, followed by the error message mentioned above.
Note: See how the JSON Action output has 2 keys but different from what was above:
System Info
System Information
pip freeze | grep langchain
python -m langchain_core.sys_info
:system: Macbook air M1 chip