vercel / ai

Build AI-powered applications with React, Svelte, Vue, and Solid
https://sdk.vercel.ai/docs
Other
10.13k stars 1.52k forks source link

experimental_onFunctionCall is not catching function_call completion. Function_call completion being sent back to client. #644

Closed jearthman closed 1 year ago

jearthman commented 1 year ago

Description

When sending an initial request to openAI API expecting a function call I get a correct response for a function call, example below:

{"function_call": {"name": "getDefinitions", "arguments": "{\n"word": "holiday"\n}"}}

Rather than experimental_onFunctionCall block being run, onCompletion is run and the above response is sent to the client.

Code example

import OpenAI from "openai";
import { OpenAIStream, StreamingTextResponse } from "ai";
import { debugLog } from "../../../utils/server-helpers";
import { getChat, setChat } from "../../../redis/redis-server-helpers";
import { findUniqueChat } from "../../../pages/api/get-chat";
import { ChatCompletionMessageParam } from "openai/resources";
import { functions, runFunction } from "./functions";
import { createMessage } from "../../../pages/api/add-message";
import { getPromptTemplate } from "../../../utils/prompt-templates";
import { getChatMessages } from "./helpers";

const openai = new OpenAI({
  apiKey: process.env.OPENAI_API_KEY!,
});

export async function POST(req: Request) {
  let { chatId, content, interactionType } = await req.json();

  if (!chatId) {
    return new Response("Missing chat id", { status: 400 });
  }

  if (!content && !interactionType) {
    return new Response("Missing content or interaction type", { status: 400 });
  }

  debugLog(
    `Request is good! chatId: ${chatId}, content: ${content}, interactionType: ${interactionType}`,
  );

  if (interactionType) {
    content = await getPromptTemplate(interactionType, content);
  } else {
    content = content[0];
  }

  let messages: ChatCompletionMessageParam[] = await getChatMessages(chatId);

  if (!messages) {
    return new Response("Error getting chat messages", { status: 400 });
  }

  messages.push({
    role: "user",
    content: content,
  });

  const initialResponse = await openai.chat.completions.create({
    model: "gpt-4-0613",
    messages,
    stream: true,
    functions,
    function_call: "auto",
  });

  const stream = OpenAIStream(initialResponse, {
    onCompletion: async (completion) => {
      //write to redis
      messages.push({
        role: "assistant",
        content: completion,
      });
      setChat(chatId, messages);
      //write to postgres
      await createMessage(
        parseInt(chatId),
        "user",
        content,
        interactionType ? true : false,
      );
      await createMessage(
        parseInt(chatId),
        "assistant",
        completion,
        interactionType && interactionType !== "greeting" ? true : false,
      );
    },
    experimental_onFunctionCall: async ({ name, arguments: args }) => {
      const result = await runFunction(name, args);
      messages.push({
        role: "function",
        name: name,
        content: result,
      });
      await createMessage(
        chatId,
        "function",
        result,
        interactionType ? true : false,
        name,
      );
      return openai.chat.completions.create({
        model: "gpt-3.5-turbo-0613",
        stream: true,
        messages: messages,
        functions,
        function_call: "auto",
      });
    },
  });

  // Respond with the stream
  return new StreamingTextResponse(stream);
}
import { ChatCompletionCreateParams } from "openai/resources/chat/index";

export const functions: ChatCompletionCreateParams.Function[] = [
  {
    name: "getDefinitions",
    description:
      "Get a list definitions for a word using the Merriam-Webster Dictionary API when the user asks for the definition of a word",
    parameters: {
      type: "object",
      properties: {
        word: {
          type: "string",
          description:
            "The word that the user needs defined and get the definitions for",
        },
      },
      required: ["word"],
    },
  },
];

const dictionaryUrl = process.env.LEARNER_DICTIONARY_URL;
const dictionaryKey = process.env.LEARNER_DICTIONARY_KEY;

const wolframAlphaLLMAPIUrl = process.env.WOLFRAM_ALPHA_LLM_API_URL;
const wolmramAlphaAppId = process.env.WOLFRAM_ALPHA_APP_ID;

type Definition = {
  headWord: string;
  functionalLabel: string;
  stems: string[];
  shortDefinitions: string[];
};

async function getDefinitions(word: string) {
  try {
    return fetch(`${dictionaryUrl}${word}?key=${dictionaryKey}`).then(
      async (res) => {
        try {
          const dictionaryRes = await res.json();

          if (typeof dictionaryRes[0] === "string") {
            return "No definitions found. Word is either a name or does not exist in the Merriam-Webster Dictionary.";
          }

          const definitions = dictionaryRes.map(
            (definitionJSON: any): Definition => {
              return {
                headWord: definitionJSON?.hwi?.hw,
                functionalLabel: definitionJSON?.fl,
                stems: definitionJSON?.stems,
                shortDefinitions: definitionJSON?.shortdef,
              };
            },
          );

          return JSON.stringify(definitions);
        } catch (error: any) {
          console.log(
            "There was an error parsing the Merriam-Webster Dictionary API response: ",
            error,
          );
          return error;
        }
      },
    );
  } catch (error: any) {
    console.log(
      "There was an error using the Merriam-Webster Dictionary API: ",
      error,
    );
    return error;
  }
}

export async function runFunction(name: string, args: any) {
  switch (name) {
    case "getDefinitions":
      return await getDefinitions(args.word);
  }
}

Additional context

No response

jearthman commented 1 year ago

This seemed to have been caused by using onCompletion rather than onFinal with experimental_onFunctionCall. I noticed that by DB was getting two sets of user and assistent messages per function call run. experimental_onFunctionCall would work correctly a couple times but then is would start just returning the function_call JSON I pasted earlier. Maybe the data redundancy was causing strange behavior. function_calling_bug_data.csv

I started using onFinal which has corrected the message data and seems to have stopped the issue.