cohere-ai / cohere-python

Python Library for Accessing the Cohere API
https://docs.cohere.ai
MIT License
278 stars 60 forks source link

Error passing tool outputs to model #525

Closed com3dian closed 2 months ago

com3dian commented 2 months ago

Issue

I'm following the Tool calling tutorial in langchain with Cohere LLM. I encoutered the ApiError: status_code: 400, body: {'message': 'invalid request: cannot specify both message and tool_results in multistep mode'} error. This error does not occur when running the same code with other LLMs (e.g., OpenAI).

Code to reproduce

from langchain_core.prompts import ChatPromptTemplate
from langchain_cohere import ChatCohere
from langchain_core.messages import HumanMessage, ToolMessage

from langchain.tools import tool
from math import pi

@tool
def add(a: int, b: int) -> int:
    """Adds a and b.

    Args:
        a: first int
        b: second int
    """
    return a + b

tools = [add]

llm = ChatCohere(cohere_api_key='<my_api_key>')
llm_with_tools = llm.bind_tools(tools)

query = "What is the result of 1347 plus 5?"
messages = [HumanMessage(query)]
ai_msg = llm_with_tools.invoke(messages)
messages.append(ai_msg)

for tool_call in ai_msg.tool_calls:
    selected_tool = {"add": add}[tool_call["name"].lower()]
    tool_output = selected_tool.invoke(tool_call["args"])
    messages.append(ToolMessage(tool_output, tool_call_id=tool_call["id"]))

llm_with_tools.invoke(messages)

-------------------------------------------------
ApiError: 
status_code: 400, 
body: {'message': 'invalid request: cannot specify both message and tool_results in multistep mode'}

Varible

print(messages)

-------------------------------------------------
[HumanMessage(content='What is the result of 1347 plus 5?'), 
 AIMessage(content="I will use the 'add' tool to calculate the answer.", 
           additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '41a24915-5df3-4dcb-888d-e35896a63c8e', 'tool_calls': [{'id': '194c1e1374a542b1804d372ba7060d2b', 'function': {'name': 'add', 'arguments': '{"a": 1347, "b": 5}'}, 'type': 'function'}], 'token_count': {'input_tokens': 920, 'output_tokens': 26}}, 
           response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '41a24915-5df3-4dcb-888d-e35896a63c8e', 'tool_calls': [{'id': '194c1e1374a542b1804d372ba7060d2b', 'function': {'name': 'add', 'arguments': '{"a": 1347, "b": 5}'}, 'type': 'function'}], 'token_count': {'input_tokens': 920, 'output_tokens': 26}}, id='run-f9e5f903-1375-40fa-bc20-d02b13bf2dea-0', 
           tool_calls=[{'name': 'add', 'args': {'a': 1347, 'b': 5}, 'id': '99d39a087d71463f80f543997c5c1c01'}]), 
 ToolMessage(content='1352', tool_call_id='99d39a087d71463f80f543997c5c1c01')]

System Info

System: linux

Python 3.10.12

cohere                        5.5.6

langchain                     0.2.2
langchain-cohere              0.1.5
langchain-community           0.0.38
langchain-core                0.2.4
langchain-openai              0.1.6
langchain-text-splitters      0.2.1
langsmith                     0.1.71
com3dian commented 2 months ago

The full log

---------------------------------------------------------------------------
ApiError                                  Traceback (most recent call last)
Cell In[8], line 61
     58     tool_output = selected_tool.invoke(tool_call["args"])
     59     messages.append(ToolMessage(tool_output, tool_call_id=tool_call["id"]))
---> 61 llm_with_tools.invoke(messages)

File ~/.local/lib/python3.10/site-packages/langchain_core/runnables/base.py:4444, in RunnableBindingBase.invoke(self, input, config, **kwargs)
   4438 def invoke(
   4439     self,
   4440     input: Input,
   4441     config: Optional[RunnableConfig] = None,
   4442     **kwargs: Optional[Any],
   4443 ) -> Output:
-> 4444     return self.bound.invoke(
   4445         input,
   4446         self._merge_configs(config),
   4447         **{**self.kwargs, **kwargs},
   4448     )

File ~/.local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:170, in BaseChatModel.invoke(self, input, config, stop, **kwargs)
    159 def invoke(
    160     self,
    161     input: LanguageModelInput,
   (...)
    165     **kwargs: Any,
    166 ) -> BaseMessage:
    167     config = ensure_config(config)
    168     return cast(
    169         ChatGeneration,
--> 170         self.generate_prompt(
    171             [self._convert_input(input)],
    172             stop=stop,
    173             callbacks=config.get("callbacks"),
    174             tags=config.get("tags"),
    175             metadata=config.get("metadata"),
    176             run_name=config.get("run_name"),
    177             run_id=config.pop("run_id", None),
    178             **kwargs,
    179         ).generations[0][0],
    180     ).message

File ~/.local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:599, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
    591 def generate_prompt(
    592     self,
    593     prompts: List[PromptValue],
   (...)
    596     **kwargs: Any,
    597 ) -> LLMResult:
    598     prompt_messages = [p.to_messages() for p in prompts]
--> 599     return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)

File ~/.local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:456, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
    454         if run_managers:
    455             run_managers[i].on_llm_error(e, response=LLMResult(generations=[]))
--> 456         raise e
    457 flattened_outputs = [
    458     LLMResult(generations=[res.generations], llm_output=res.llm_output)  # type: ignore[list-item]
    459     for res in results
    460 ]
    461 llm_output = self._combine_llm_outputs([res.llm_output for res in results])

File ~/.local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:446, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
    443 for i, m in enumerate(messages):
    444     try:
    445         results.append(
--> 446             self._generate_with_cache(
    447                 m,
    448                 stop=stop,
    449                 run_manager=run_managers[i] if run_managers else None,
    450                 **kwargs,
    451             )
    452         )
    453     except BaseException as e:
    454         if run_managers:

File ~/.local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:671, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs)
    669 else:
    670     if inspect.signature(self._generate).parameters.get("run_manager"):
--> 671         result = self._generate(
    672             messages, stop=stop, run_manager=run_manager, **kwargs
    673         )
    674     else:
    675         result = self._generate(messages, stop=stop, **kwargs)

File ~/.local/lib/python3.10/site-packages/langchain_cohere/chat_models.py:433, in ChatCohere._generate(self, messages, stop, run_manager, **kwargs)
    428     return generate_from_stream(stream_iter)
    430 request = get_cohere_chat_request(
    431     messages, stop_sequences=stop, **self._default_params, **kwargs
    432 )
--> 433 response = self.client.chat(**request)
    435 generation_info = self._get_generation_info(response)
    436 if "tool_calls" in generation_info:

File ~/.local/lib/python3.10/site-packages/cohere/client.py:33, in validate_args.<locals>.wrapped(*args, **kwargs)
     31 def wrapped(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
     32     check_fn(*args, **kwargs)
---> 33     return method(*args, **kwargs)

File ~/.local/lib/python3.10/site-packages/cohere/base_client.py:799, in BaseCohere.chat(self, message, model, preamble, chat_history, conversation_id, prompt_truncation, connectors, search_queries_only, documents, citation_quality, temperature, max_tokens, max_input_tokens, k, p, seed, stop_sequences, frequency_penalty, presence_penalty, raw_prompting, return_prompt, tools, tool_results, force_single_step, request_options)
    797 except JSONDecodeError:
    798     raise ApiError(status_code=_response.status_code, body=_response.text)
--> 799 raise ApiError(status_code=_response.status_code, body=_response_json)

ApiError: status_code: 400, body: {'message': 'invalid request: cannot specify both message and tool_results in multistep mode'}
Anirudh31415926535 commented 2 months ago

Hi @com3dian Thanks a lot for the question! This error is due to a cohere api update that we had recently! Please update the langchain-cohere package to use version 0.1.7, and ensure that the cohere version is >= 0.5.6.

That should help to resolve the issue you're currently facing!

com3dian commented 2 months ago

Thank you! It is fixed:) @Anirudh31415926535