langchain-ai / langchain

🦜🔗 Build context-aware reasoning applications
https://python.langchain.com
MIT License
94.58k stars 15.31k forks source link

DOC: How do I make a custom LLM use the bind_tools feature? #26129

Open nnnnwinder opened 2 months ago

nnnnwinder commented 2 months ago

URL

https://python.langchain.com/v0.2/docs/how_to/tool_calling/

Checklist

Issue with current documentation:

As we can see our LLM generated arguments to a tool! You can look at the docs for bind_tools() to learn about all the ways to customize how your LLM selects tools, as well as this guide on how to force the LLM to call a tool rather than letting it decide

Idea or request for content:

How do I make a custom LLM use the bind_tools feature? I want to know the implementation

nnnnwinder commented 2 months ago
import os
from typing import List, Optional, Any, Iterator, Dict, Sequence, Type, Callable, Union, Literal

import openai
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import BaseChatModel, LanguageModelInput
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, AIMessageChunk, SystemMessage
from langchain_core.outputs import ChatResult, ChatGenerationChunk, ChatGeneration
from langchain_core.runnables import Runnable
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
from pydantic import BaseModel

class CustomOpenAiModel(BaseChatModel):
    model_name = "gpt-4o-mini"
    max_token: int = 4096
    temperature: float = 0.2
    openai_key: str = os.environ.get('openaiAPI', '')

    def _generate(
        self,
        messages: List[BaseMessage],
        stop: Optional[List[str]] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs: Any,
    ) -> ChatResult:
        openai.api_key = self.openai_key

        messages_: List[Dict] = []

        for message in messages:
            if isinstance(message, HumanMessage):
                messages_.append(
                    {
                        "role": "user",
                        "content": message.content,
                    }
                )
            elif isinstance(message, AIMessage):
                messages_.append(
                    {
                        "role": "assistant",
                        "content": message.content,
                    }
                )
            elif isinstance(message, SystemMessage):
                messages_.append(
                    {
                        "role": "system",
                        "content": message.content,
                    }
                )
        # print(messages_)
        response = openai.chat.completions.create(
            model=self.model_name,
            messages=messages_,
            max_tokens=self.max_token,
            n=1,
            temperature=self.temperature,
        )
        response_content = response.choices[0].message.content
        response_metadata = {
            "model_name": response.model,
            "token_usage": response.usage,
            "finish_reason": response.choices[0].finish_reason,
            "log_probs": response.choices[0].logprobs,
            "id": response.id
        }

        result_message = AIMessage(
            content=response_content,
            response_metadata=response_metadata
        )

        return ChatResult(generations=[ChatGeneration(message=result_message)])

    def _stream(
        self,
        messages: List[BaseMessage],
        stop: Optional[List[str]] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs: Any,
    ) -> Iterator[ChatGenerationChunk]:
        openai.api_key = self.openai_key

        messages_: List[Dict] = []
        for message in messages:
            if isinstance(message, HumanMessage):
                messages_.append(
                    {
                        "role": "user",
                        "content": message.content,
                    }
                )
            elif isinstance(message, AIMessage):
                messages_.append(
                    {
                        "role": "assistant",
                        "content": message.content,
                    }
                )
            elif isinstance(message, SystemMessage):
                messages_.append(
                    {
                        "role": "system",
                        "content": message.content,
                    }
                )

        response = openai.chat.completions.create(
            model=self.model_name,
            messages=messages_,
            max_tokens=self.max_token,
            n=1,
            stream=True,
            temperature=self.temperature,
        )

        for chunk in response:
            if chunk.choices[0].delta.content:
                yield ChatGenerationChunk(message=AIMessageChunk(content=chunk.choices[0].delta.content))

    @property
    def _llm_type(self) -> str:
        """Get the type of language model used by this chat model."""
        return self.model_name

    def bind_tools(
        self,
        tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
        *,
        tool_choice: Optional[Union[dict, str, Literal["auto", "none"], bool]] = None,
        **kwargs: Any,
    ) -> Runnable[LanguageModelInput, BaseMessage]:
        """Bind tool-like objects to this chat model.

        Assumes model is compatible with OpenAI tool-calling API.

        Args:
            tools: A list of tool definitions to bind to this chat model.
                Can be  a dictionary, pydantic model, callable, or BaseTool. Pydantic
                models, callables, and BaseTools will be automatically converted to
                their schema dictionary representation.
            tool_choice: Which tool to require the model to call.
                Must be the name of the single provided function or
                "auto" to automatically determine which function to call
                (if any), or a dict of the form:
                {"type": "function", "function": {"name": <<tool_name>>}}.
            **kwargs: Any additional parameters to pass to the
                :class:`~langchain.runnable.Runnable` constructor.
        """

        formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
        if tool_choice is not None and tool_choice:
            if len(formatted_tools) != 1:
                raise ValueError(
                    "When specifying `tool_choice`, you must provide exactly one "
                    f"tool. Received {len(formatted_tools)} tools."
                )
            if isinstance(tool_choice, str):
                if tool_choice not in ("auto", "none"):
                    tool_choice = {
                        "type": "function",
                        "function": {"name": tool_choice},
                    }
            elif isinstance(tool_choice, bool):
                tool_choice = formatted_tools[0]
            elif isinstance(tool_choice, dict):
                if (
                    formatted_tools[0]["function"]["name"]
                    != tool_choice["function"]["name"]
                ):
                    raise ValueError(
                        f"Tool choice {tool_choice} was specified, but the only "
                        f"provided tool was {formatted_tools[0]['function']['name']}."
                    )
            else:
                raise ValueError(
                    f"Unrecognized tool_choice type. Expected str, bool or dict. "
                    f"Received: {tool_choice}"
                )
            kwargs["tool_choice"] = tool_choice
        return super().bind(tools=formatted_tools, **kwargs)

I intend to customize the chat model, but writing bind_tools and then calling bind_tools does not implement the function in the document, how should I write to implement bind_tools?