hinthornw / trustcall

Tenacious tool calling built on LangGraph
MIT License
116 stars 13 forks source link

ChatBedrockConverse compatibility issue #7

Open arthberman opened 1 week ago

arthberman commented 1 week ago

It's working fine with ChatOpenAI but it seems not compatible with ChatBedrockConverse, any help on that ?

 File "/usr/local/lib/python3.12/site-packages/langgraph/pregel/__init__.py", line 1477, in astream
    async for _ in runner.atick(
  File "/usr/local/lib/python3.12/site-packages/langgraph/pregel/runner.py", line 194, in atick
    _panic_or_proceed(
  File "/usr/local/lib/python3.12/site-packages/langgraph/pregel/runner.py", line 273, in _panic_or_proceed
    raise exc
  File "/usr/local/lib/python3.12/site-packages/langgraph/pregel/retry.py", line 102, in arun_with_retry
    await task.proc.ainvoke(task.input, config)
  File "/usr/local/lib/python3.12/site-packages/langgraph/utils/runnable.py", line 453, in ainvoke
    input = await asyncio.create_task(coro, context=context)
            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/usr/local/lib/python3.12/site-packages/langgraph/pregel/__init__.py", line 1588, in ainvoke
    async for chunk in self.astream(
  File "/usr/local/lib/python3.12/site-packages/langgraph/pregel/__init__.py", line 1477, in astream
    async for _ in runner.atick(
  File "/usr/local/lib/python3.12/site-packages/langgraph/pregel/runner.py", line 130, in atick
    await arun_with_retry(t, retry_policy, stream=self.use_astream)
  File "/usr/local/lib/python3.12/site-packages/langgraph/pregel/retry.py", line 99, in arun_with_retry
    async for _ in task.proc.astream(task.input, config):
  File "/usr/local/lib/python3.12/site-packages/langgraph/utils/runnable.py", line 576, in astream
    async for chunk in aiterator:
  File "/usr/local/lib/python3.12/site-packages/langchain_core/runnables/base.py", line 1455, in atransform
    async for ichunk in input:
  File "/usr/local/lib/python3.12/site-packages/langchain_core/runnables/base.py", line 1018, in astream
    yield await self.ainvoke(input, config, **kwargs)
          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/usr/local/lib/python3.12/site-packages/langgraph/utils/runnable.py", line 236, in ainvoke
    ret = await asyncio.create_task(coro, context=context)
          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/usr/local/lib/python3.12/site-packages/langchain_core/runnables/config.py", line 588, in run_in_executor
    return await asyncio.get_running_loop().run_in_executor(
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/usr/local/lib/python3.12/concurrent/futures/thread.py", line 58, in run
    result = self.fn(*self.args, **self.kwargs)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/usr/local/lib/python3.12/site-packages/langchain_core/runnables/config.py", line 579, in wrapper
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^
  File "/usr/local/lib/python3.12/site-packages/trustcall/_base.py", line 250, in create_extractor
    _Extract(
  File "/usr/local/lib/python3.12/site-packages/trustcall/_base.py", line 469, in __init__
    self.bound_llm = llm.bind_tools(
                     ^^^^^^^^^^^^^^^
  File "/usr/local/lib/python3.12/site-packages/langchain_aws/chat_models/bedrock_converse.py", line 546, in bind_tools
    return self.bind(tools=_format_tools(tools), **kwargs)
                           ^^^^^^^^^^^^^^^^^^^
  File "/usr/local/lib/python3.12/site-packages/langchain_aws/chat_models/bedrock_converse.py", line 922, in _format_tools
    spec = convert_to_openai_function(tool)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/usr/local/lib/python3.12/site-packages/langchain_core/utils/function_calling.py", line 398, in convert_to_openai_function
    raise ValueError(msg)

ValueError: Unsupported function

{'type': 'function', 'function': { ... pydantic model definition ...}

Functions must be passed in as Dict, pydantic.BaseModel, or Callable. If they're a dict they must either be in OpenAI function format or valid JSON schema with top-level 'title' and 'description' keys.

Many thks, really appreciate this lib :)

3coins commented 4 days ago

@arthberman I believe the problem seems to be that trustcall is sending the tool in the open ai function format (with type=function, and function keys), not directly as the base model, which fails the validation in convert_to_openai_function. Here is the relevant trustcall code.

https://github.com/hinthornw/trustcall/blob/dc5e634beae7f4334dc50f715f0712c75c8cb7c7/trustcall/_base.py#L469-L482

Here is a small change to _format_tools in ChatBedrockConverse, that should fix the error. I still see some keys with 'UNKNOWN' values in response, so we might need to look at this more deeply.

def _format_tools(
    tools: Sequence[Union[Dict[str, Any], TypeBaseModel, Callable, BaseTool],],
) -> List[Dict[Literal["toolSpec"], Dict[str, Union[Dict[str, Any], str]]]]:
    formatted_tools: List = []
    for tool in tools:
        if isinstance(tool, dict) and "toolSpec" in tool:
            formatted_tools.append(tool)
        else:
            if isinstance(tool, dict) and "function" in tool:
                tool = tool["function"]
            spec = convert_to_openai_function(tool)
            if not spec["description"]:
                spec["description"] = spec["name"]
            spec["inputSchema"] = {"json": spec.pop("parameters")}
            formatted_tools.append({"toolSpec": spec})
    return formatted_tools

Here is the output after the change.

{
  "pertinent_user_preferences": {
    "communication_preferences": {
      "telegram": {
        "preferred_encoding": [
          {
            "preference": "Morse code",
            "sentence_preference_revealed": "Customer prefers Morse code for encoding the telegram."
          }
        ],
        "favorite_telegram_operators": [
          {
            "preference": "<UNKNOWN>",
            "sentence_preference_revealed": "No specific favorite telegram operator mentioned."
          }
        ],
        "preferred_telegram_paper": [
          {
            "preference": "Daredevil",
            "sentence_preference_revealed": "Customer agrees to use 'Daredevil' paper for the telegram."
          }
        ]
      },
      "morse_code": {
        "preferred_key_type": [
          {
            "preference": "straight key",
            "sentence_preference_revealed": "Customer expresses love for using a straight key for Morse code."
          }
        ],
        "favorite_morse_abbreviations": [
          {
            "preference": "<UNKNOWN>",
            "sentence_preference_revealed": "No specific favorite Morse abbreviations mentioned."
          }
        ]
      },
      "semaphore": {
        "preferred_flag_color": [
          {
            "preference": "<UNKNOWN>",
            "sentence_preference_revealed": "No information provided about semaphore flag color preferences."
          }
        ],
        "semaphore_skill_level": [
          {
            "preference": "<UNKNOWN>",
            "sentence_preference_revealed": "No information provided about semaphore skill level."
          }
        ]
      }
    },
    "trust_fall_preferences": {
      "preferred_fall_height": [
        {
          "preference": "higher",
          "sentence_preference_revealed": "Customer expresses readiness for a higher fall in the trust fall exercise."
        }
      ],
      "trust_level": [
        {
          "preference": "high",
          "sentence_preference_revealed": "Customer shows a high level of trust by being ready for a higher fall."
        }
      ],
      "preferred_catching_technique": [
        {
          "preference": "diamond formation",
          "sentence_preference_revealed": "Customer prefers the diamond formation for catching during the trust fall."
        }
      ]
    }
  }
}