langchain-ai / langchain-google

MIT License
101 stars 120 forks source link

Gemini Model doesn't support tool_example_to_messages(examples) from Langchain documentation #490

Open pratham-darooka opened 1 week ago

pratham-darooka commented 1 week ago

I tried following Langchain documentation for query decomposition. But seems like it is not supported by Google Gemini models. How can I modify this code to do so?

class SubQuery(BaseModel):

    sub_query: str = Field(
        ...,
        description="A very specific query for a search engine.",
    )

##########
# Adding examples to Prompt
QUERY_DECOMPOSITION_PROMPT_EXAMPLES = []
# Example 1
question = "Did company X increase its dividend and buy back stock?"
queries = [
    SubQuery(sub_query="Did company X increase its dividend?"),
    SubQuery(sub_query="Did company X buy back stock?"),
]
QUERY_DECOMPOSITION_PROMPT_EXAMPLES.append({"input": question, "tool_calls": queries})

# Example 2
question = "What is the revenue and profit of the company?"
queries = [
    SubQuery(sub_query="What is the revenue of the company?"),
    SubQuery(sub_query="What is the profit of the company?"),
]
QUERY_DECOMPOSITION_PROMPT_EXAMPLES.append({"input": question, "tool_calls": queries})

# Example 3
question = "Hey, what are the company's risks? How has its stock performed?"
queries = [
    SubQuery(sub_query="What are the company's risks?"),
    SubQuery(sub_query="How has the company's stock performed?"),
]
QUERY_DECOMPOSITION_PROMPT_EXAMPLES.append({"input": question, "tool_calls": queries})

# Example 4
question = "Hey, what can you do?"
queries = [
    SubQuery(sub_query="Hey"),
    SubQuery(sub_query="What can you do?"),
]
QUERY_DECOMPOSITION_PROMPT_EXAMPLES.append({"input": question, "tool_calls": queries})
##########

# this function is erroneous (copied from https://python.langchain.com/v0.1/docs/use_cases/query_analysis/techniques/decomposition/#adding-examples-and-tuning-the-prompt)
def tool_example_to_messages(example: Dict) -> List[BaseMessage]:
    messages: List[BaseMessage] = [HumanMessage(content=example["input"])]
    openai_tool_calls = []
    for tool_call in example["tool_calls"]:
        openai_tool_calls.append(
            {
                "id": str(uuid.uuid4()),
                "type": "function",
                "function": {
                    "name": tool_call.__class__.__name__,
                    "arguments": tool_call.json(),
                },
            }
        )
    messages.append(
        AIMessage(content="", additional_kwargs={"tool_calls": openai_tool_calls})
    )
    tool_outputs = example.get("tool_outputs") or [
        "\nThis is an example of a correct usage of this tool. Make sure to continue using the tool this way.\n"
    ] * len(openai_tool_calls)
    for output, tool_call in zip(tool_outputs, openai_tool_calls):
        messages.append(ToolMessage(content=output, tool_call_id=tool_call["id"]))
    return messages

# @retry(stop=stop_after_attempt(6), wait=wait_fixed(10))
def _decompose_query(original_query: str, llm):
    QUERY_DECOMPOSITION_EXAMPLE_MESSAGES = [msg for ex in QUERY_DECOMPOSITION_PROMPT_EXAMPLES for msg in tool_example_to_messages(ex)]
    QUERY_DECOMPOSITION_PROMPT = ChatPromptTemplate.from_messages(
        [
            ("system", QUERY_DECOMPOSITION_PROMPT_TEMPLATE),
            MessagesPlaceholder("examples", optional=True),
            ("human", "{question}"),
        ]
    )

    llm_with_tools = llm.bind_tools([SubQuery])
    parser = PydanticToolsParser(tools=[SubQuery])

    query_analyzer = QUERY_DECOMPOSITION_PROMPT.partial(examples=QUERY_DECOMPOSITION_EXAMPLE_MESSAGES) | llm_with_tools | parser # this doesn't work
    # query_analyzer = QUERY_DECOMPOSITION_PROMPT | llm_with_tools | parser # this works

    decomposed_queries = query_analyzer.invoke({"question" : original_query})
    logger.info(f"Decomposed query {original_query} to: {', '.join([x.sub_query for x in decomposed_queries])}")

    return [x.sub_query for x in decomposed_queries]

Error seen in logs:

[llm/error] [chain:RunnableSequence > llm:ChatGoogleGenerativeAI] [1.56s] LLM run errored with error:
"ChatGoogleGenerativeAIError('Invalid argument provided to Gemini: 400 * GenerateContentRequest.contents[3].parts[0].function_response.name: Name cannot be empty.\\n* GenerateContentRequest.contents[7].parts[0].function_response.name: Name cannot be empty.\\n* GenerateContentRequest.contents[11].parts[0].function_response.name: Name cannot be empty.\\n')

Thanks for your help!

lkuligin commented 5 hours ago

the error speaks for itself, you should adjust your spec