langchain-ai / langchainjs

πŸ¦œπŸ”— Build context-aware reasoning applications πŸ¦œπŸ”—
https://js.langchain.com/docs/
MIT License
12.29k stars 2.08k forks source link

BadRequestError: 400 An assistant message with 'tool_calls' must be followed by tool messages responding to each 'tool_call_id'. The following tool_call_ids did not have response messages: call_hSmZB4G8cu3xYSWU6swBuOMo #6622

Open arkodeep3404 opened 2 weeks ago

arkodeep3404 commented 2 weeks ago

Checked other resources

Example Code

import dotenv from "dotenv";
dotenv.config();

import { tool } from "@langchain/core/tools";

import { DynamoDBChatMessageHistory } from "@langchain/community/stores/message/dynamodb";
import { ChatOpenAI } from "@langchain/openai";
import { RunnableWithMessageHistory } from "@langchain/core/runnables";
import {
  ChatPromptTemplate,
  MessagesPlaceholder,
} from "@langchain/core/prompts";
import { AgentExecutor, createOpenAIToolsAgent } from "langchain/agents";

const prompt = ChatPromptTemplate.fromMessages([
  [
    "system",
    "You are a helpful assistant. Answer all questions to the best of your ability.",
  ],
  new MessagesPlaceholder("chat_history"),
  ["human", "{input}"],
  new MessagesPlaceholder("agent_scratchpad"),
]);

// const prompt = `Answer the following questions as best you can. You have access to the following tools:

// {tools}

// Use the following format:

// Question: the input question you must answer
// Thought: you should always think about what to do
// Action: the action to take, should be one of [{tool_names}]
// Action Input: the input to the action
// Observation: the result of the action
// ... (this Thought/Action/Action Input/Observation can repeat N times)
// Thought: I now know the final answer
// Final Answer: the final answer to the original input question

// Begin!

// Question: {input}
// Thought:{agent_scratchpad}`;

const imageTool = tool(
  async () => {
    return "image url";
  },
  {
    name: "Get-Image-Tool",
    description:
      "Use this tool if the user asks you to send them an image/picture",
  }
);

const llm = new ChatOpenAI({
  modelName: "gpt-4o-mini",
  openAIApiKey: process.env.OPENAI_API_KEY,
});

const tools = [imageTool];

const agent = await createOpenAIToolsAgent({
  llm,
  tools,
  prompt,
});

const agentExecutor = new AgentExecutor({ agent, tools });

const agentWithChatHistory = new RunnableWithMessageHistory({
  runnable: agentExecutor,
  inputMessagesKey: "input",
  historyMessagesKey: "chat_history",
  getMessageHistory: async (sessionId) => {
    return new DynamoDBChatMessageHistory({
      tableName: process.env.AWS_TABLE_NAME,
      partitionKey: process.env.AWS_TABLE_PARTITION_KEY,
      sessionId,
      config: {
        region: process.env.AWS_REGION,
        credentials: {
          accessKeyId: process.env.AWS_ACCESS_KEY_ID,
          secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
        },
      },
    });
  },
});

// const res1 = await chainWithHistory.invoke(
//   {
//     input: "Hi! I'm Arkodeep",
//   },
//   { configurable: { sessionId: "test" } }
// );
// console.log(res1);

/*
  "Hello MJDeligan! It's nice to meet you. My name is AI. How may I assist you today?"
*/

const res2 = await agentWithChatHistory.invoke(
  { input: "send me a pic" },
  { configurable: { sessionId: "test" } }
);
console.log(res2);

/*
  "You said your name was MJDeligan."
*/

Error Message and Stack Trace (if applicable)

arkodeepchatterjee@Arkodeeps-MacBook-Air chat % node index.js New LangChain packages are available that more efficiently handle tool calling.

Please upgrade your packages to versions that set message tool calls. e.g., yarn add @langchain/anthropic, yarn add @langchain/openai`, etc. node:internal/process/esm_loader:40 internalBinding('errors').triggerUncaughtException( ^

BadRequestError: 400 An assistant message with 'tool_calls' must be followed by tool messages responding to each 'tool_call_id'. The following tool_call_ids did not have response messages: call_hSmZB4G8cu3xYSWU6swBuOMo at APIError.generate (file:///Users/arkodeepchatterjee/Desktop/chat/node_modules/openai/error.mjs:41:20) at OpenAI.makeStatusError (file:///Users/arkodeepchatterjee/Desktop/chat/node_modules/openai/core.mjs:268:25) at OpenAI.makeRequest (file:///Users/arkodeepchatterjee/Desktop/chat/node_modules/openai/core.mjs:311:30) at process.processTicksAndRejections (node:internal/process/task_queues:95:5) at async file:///Users/arkodeepchatterjee/Desktop/chat/node_modules/@langchain/openai/dist/chat_models.js:1302:29 at async RetryOperation._fn (/Users/arkodeepchatterjee/Desktop/chat/node_modules/p-retry/index.js:50:12) { status: 400, headers: { 'access-control-expose-headers': 'X-Request-ID', 'alt-svc': 'h3=":443"; ma=86400', 'cf-cache-status': 'DYNAMIC', 'cf-ray': '8b806983a8654924-BOM', connection: 'keep-alive', 'content-length': '325', 'content-type': 'application/json', date: 'Sat, 24 Aug 2024 03:56:23 GMT', 'openai-organization': 'raheel-ioccbf', 'openai-processing-ms': '35', 'openai-version': '2020-10-01', server: 'cloudflare', 'set-cookie': '__cf_bm=2ASO9cTC1RGpoU5riValTxclo2WG6YDSKtGfyQOpN4g-1724471783-1.0.1.1-wkwwmkYmh8QqeqhwnilEfExTRwWl8gpMkktFUXZUmNC71JxmScjHgAsHP_GeYOLI454zeXwwyrBY.cAwIgTfEA; path=/; expires=Sat, 24-Aug-24 04:26:23 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=2AH1HjBX2Up6jHM3koaSozR2nJgLaHgTR5sctMnmKPE-1724471783415-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None', 'strict-transport-security': 'max-age=15552000; includeSubDomains; preload', 'x-content-type-options': 'nosniff', 'x-ratelimit-limit-requests': '5000', 'x-ratelimit-limit-tokens': '2000000', 'x-ratelimit-remaining-requests': '4999', 'x-ratelimit-remaining-tokens': '1999768', 'x-ratelimit-reset-requests': '12ms', 'x-ratelimit-reset-tokens': '6ms', 'x-request-id': 'req_8f7bf4f69c986415a30c4e99dfd0949d' }, request_id: 'req_8f7bf4f69c986415a30c4e99dfd0949d', error: { message: "An assistant message with 'tool_calls' must be followed by tool messages responding to each 'tool_call_id'. The following tool_call_ids did not have response messages: call_hSmZB4G8cu3xYSWU6swBuOMo", type: 'invalid_request_error', param: 'messages.[19].role', code: null }, code: null, param: 'messages.[19].role', type: 'invalid_request_error', attemptNumber: 1, retriesLeft: 6 }

Node.js v20.9.0 arkodeepchatterjee@Arkodeeps-MacBook-Air chat %

Description

tools + agent + agent executor + RunnableWithMessageHistory + long term db backed chat memory isnt working properly. agent executor fails to execute agent tool function with the inputs received from llm model

System Info

{ "name": "chat", "version": "1.0.0", "main": "index.js", "type": "module", "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, "keywords": [], "author": "", "license": "ISC", "description": "", "dependencies": { "@aws-sdk/client-dynamodb": "^3.637.0", "@langchain/community": "^0.2.31", "@langchain/core": "^0.2.28", "@langchain/openai": "^0.2.7", "dotenv": "^16.4.5", "langchain": "^0.2.16", "zod": "^3.23.8" } }

dosubot[bot] commented 2 weeks ago

I found a similar issue that might help you resolve the "BadRequestError: 400 An assistant message with 'tool_calls' must be followed by tool messages responding to each 'tool_call_id'" error. The issue is related to ensuring that tool messages are correctly formatted and follow the tool calls in the sequence [1].

Here’s a step-by-step guide to ensure proper setup:

  1. Ensure the Prompt Includes agent_scratchpad: The prompt must have an input variable named agent_scratchpad.

    const prompt = ChatPromptTemplate.fromMessages([
     ["system", "You are a helpful assistant. Answer all questions to the best of your ability."],
     new MessagesPlaceholder("chat_history"),
     ["human", "{input}"],
     new MessagesPlaceholder("agent_scratchpad"),
    ]);
  2. Bind Tools to the Model: If using a base chat model, ensure it has the bindTools method implemented.

    let modelWithTools;
    if (_isBaseChatModel(llm)) {
     if (llm.bindTools === undefined) {
       throw new Error(
         `This agent requires that the "bind_tools()" method be implemented on the input model.`
       );
     }
     modelWithTools = llm.bindTools(tools);
    } else {
     modelWithTools = llm;
    }
  3. Create the Agent with Proper Sequence: Use AgentRunnableSequence to ensure the correct sequence of operations, including formatting tool messages.

    const agent = AgentRunnableSequence.fromRunnables(
     [
       RunnablePassthrough.assign({
         agent_scratchpad: (input: { steps: ToolsAgentStep[] }) =>
           formatToToolMessages(input.steps),
       }),
       prompt,
       modelWithTools,
       new ToolCallingAgentOutputParser(),
     ],
     {
       name: "ToolCallingAgent",
       streamRunnable,
       singleAction: false,
     }
    );
  4. Execute the Agent: Use AgentExecutor to invoke the agent with the necessary input.

    const agentExecutor = new AgentExecutor({ agent, tools });
    const result = await agentExecutor.invoke({ input: "send me a pic" });

Additionally, ensure that the tool messages are correctly formatted and follow the tool calls in the sequence. The convertMessageToCohereMessage function in the ChatCohere integration can help format the messages correctly [2]:

function convertMessageToCohereMessage(
  message: BaseMessage,
  toolResults: ToolResult[]
): Cohere.Message {
  const getRole = (role: MessageType) => {
    switch (role) {
      case "system":
        return "SYSTEM";
      case "human":
        return "USER";
      case "ai":
        return "CHATBOT";
      case "tool":
        return "TOOL";
      default:
        throw new Error(
          `Unknown message type: '${role}'. Accepted types: 'human', 'ai', 'system', 'tool'`
        );
    }
  };

  const getContent = (content: MessageContent): string => {
    if (typeof content === "string") {
      return content;
    }
    throw new Error(
      `ChatCohere does not support non text message content. Received: ${JSON.stringify(
        content,
        null,
        2
      )}`
    );
  };

  const getToolCall = (message: BaseMessage): Cohere.ToolCall[] => {
    if (isAIMessage(message) && message.tool_calls) {
      return message.tool_calls.map((toolCall) => ({
        name: toolCall.name,
        parameters: toolCall.args,
      }));
    }
    return [];
  };

  if (message._getType().toLowerCase() === "ai") {
    return {
      role: getRole(message._getType()),
      message: getContent(message.content),
      toolCalls: getToolCall(message),
    };
  } else if (message._getType().toLowerCase() === "tool") {
    return {
      role: getRole(message._getType()),
      message: getContent(message.content),
      toolResults,
    };
  } else if (
    message._getType().toLowerCase() === "human" ||
    message._getType().toLowerCase() === "system"
  ) {
    return {
      role: getRole(message._getType()),
      message: getContent(message.content),
    };
  } else {
    throw new Error(
      "Got unknown message type. Supported types are AIMessage, ToolMessage, HumanMessage, and SystemMessage"
    );
  }
}

By following these steps and ensuring proper message formatting, you should be able to resolve the "BadRequestError: 400" error.

To continue talking to Dosu, mention @dosu.