langchain-ai / langchainjs

🦜🔗 Build context-aware reasoning applications 🦜🔗
https://js.langchain.com/docs/
MIT License
11.79k stars 1.97k forks source link

Error in chat route: BadRequestError: 400 Invalid value for 'content': expected a string, got null. #4153

Closed RedouanAssakali closed 2 months ago

RedouanAssakali commented 5 months ago

Error in chat route: BadRequestError: 400 Invalid value for 'content': expected a string, got null. at APIError.generate (/Users/redouanassakali/Desktop/ahadai/backend/node_modules/openai/error.js:44:20) at OpenAI.makeStatusError (/Users/redouanassakali/Desktop/ahadai/backend/node_modules/openai/core.js:255:33) at OpenAI.makeRequest (/Users/redouanassakali/Desktop/ahadai/backend/node_modules/openai/core.js:297:30) at process.processTicksAndRejections (node:internal/process/task_queues:95:5) at async /Users/redouanassakali/Desktop/ahadai/backend/node_modules/@langchain/openai/dist/chat_models.cjs:650:29 at async RetryOperation._fn (/Users/redouanassakali/Desktop/ahadai/backend/node_modules/p-retry/index.js:50:12) { status: 400,

makechain.js: nst { OpenAI } = require('langchain/llms/openai'); const { PineconeStore } = require('langchain/vectorstores/pinecone'); const { ConversationalRetrievalQAChain } = require('langchain/chains'); const { BufferMemory } = require('langchain/memory'); // Import BufferMemory const { FirestoreChatMessageHistory } = require('langchain/stores/message/firestore'); // Import FirestoreChatMessageHistory const { PromptTemplate } = require("langchain/prompts"); const admin = require("firebase-admin"); const {ChatOpenAI} = require("@langchain/openai"); const path = require('path'); const dotenv = require('dotenv').config({path: path.resolve(__dirname, '../.env')}) const CONDENSE_PROMPT = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.

Chat History: {chat_history} Follow Up Input: {question} Standalone question:

`;

const QA_PROMPT = `You are {author}. Use the following pieces of context to answer the question at the end.

{context} Question: {question} Helpful answer ALWAYS output in markdown!!!

`;

const makeChain = (vectorstore, sessionId, uid, chatbot) => {

const firestoreConfig = {
    collectionName: "chathistory",
    sessionId: sessionId,
    userId: uid,
    config: { projectId: "unity-352c6" },
};

// Initialize the FirestoreChatMessageHistory for chat history
// Here a new sub collection for messages was made
const chatHistory = new FirestoreChatMessageHistory(firestoreConfig);

console.log("chatHistory", chatHistory)
// Create a BufferMemory instance with FirestoreChatMessageHistory
const memory = new BufferMemory({
    chatHistory,
    memoryKey: "chat_history",
    inputKey: "question",
    outputKey: "text",
});

console.log("memory", memory);

const model = new ChatOpenAI({
    temperature: 0.5,
    modelName: chatbot.activeModel,
    openAIApiKey: process.env.OPENAI_API_KEY

});

return ConversationalRetrievalQAChain.fromLLM(
    model,
    vectorstore.asRetriever(),
    {
        qaTemplate: QA_PROMPT.replace('{author}', chatbot.name),
        questionGeneratorTemplate: CONDENSE_PROMPT,
        returnSourceDocuments: true,
        memory,
    },
);

};

module.exports = { makeChain };

agent.js: const {OpenAI} = require('langchain/llms/openai'); const {initializeAgentExecutorWithOptions} = require('langchain/agents');

const {PineconeStore} = require('langchain/vectorstores/pinecone'); const {OpenAIEmbeddings} = require('langchain/embeddings/openai'); const admin = require("firebase-admin"); const {Pinecone} = require('@pinecone-database/pinecone');

const {BufferMemory, ChatMessageHistory} = require('langchain/memory'); // Import BufferMemory const {FirestoreChatMessageHistory} = require('langchain/stores/message/firestore'); // Import FirestoreChatMessageHistory const {PromptTemplate} = require("langchain/prompts");

const {ChainTool} = require("langchain/tools"); const {ConversationalRetrievalQAChain} = require('langchain/chains'); const {response} = require("express"); const {ChatOpenAI} = require("@langchain/openai"); const {makeChain} = require("./makechain"); const {MessagesPlaceholder} = require("@langchain/core/prompts"); const {OpenAIAgentTokenBufferMemory} = require("langchain/agents/toolkits"); const path = require('path'); const dotenv = require('dotenv').config({path: path.resolve(__dirname, '../.env')})

const runChatbot = async (vectorStore, sessionId, uid, chatbotData) => {

const model = new ChatOpenAI({
    temperature: 0.5,
    modelName: chatbotData.activeModel,
    openAIApiKey: process.env.OPENAI_API_KEY

});

const firestoreConfig = {
    collectionName: "chathistory",
    sessionId: sessionId,
    userId: uid,
    config: {projectId: "unity-352c6"},
};

// Initialize the FirestoreChatMessageHistory for chat history
// Here a new sub collection for messages was made
const chatHistory = new FirestoreChatMessageHistory(firestoreConfig);

const memory = new OpenAIAgentTokenBufferMemory({
    llm: model,
    chatHistory: chatHistory,
    memoryKey: 'chat_history',
    outputKey: 'output',
    inputKey: 'input'
})
// Create a conversational chain
const retrievalChain = makeChain(vectorStore, sessionId, uid, chatbotData)

const qaTool = new ChainTool({
    name: `qa_tool`,
    description: `Books of the ${chatbotData.name} QA - useful for when you need to ask questions about the opinion of ${chatbotData.name}.`,
    chain: retrievalChain,
    returnDirect: true,
    verbose: true
})

const tools = [qaTool]

return await initializeAgentExecutorWithOptions(tools, model, {
    agentType: 'openai-functions',
    memory: memory,
    returnIntermediateSteps: true,
    agentArgs: {
        prefix:
            `You are ${chatbotData.name}  answering questions about ${chatbotData.name} you can use tools to read his books answer question with this information.`,
        memoryPrompts: [new MessagesPlaceholder("chat_history")],
    }
})

} module.exports = {runChatbot};

// Chat API Route router.post('/chat-agent', async (req, res) => { // Handles incoming chat requests const {question, history, sessionId, uid, chatbot} = req.body;

// Validate input
if (!sessionId || !uid) {
    return res.status(400).json({message: 'No session id in the request'});
}

// Check if a question is provided
if (!question) {
    return res.status(400).json({message: 'No question in the request'});
}

// Check if a chatbot is provided
if (!chatbot) {
    return res.status(400).json({message: 'No chatbot in the request'});
}

// Sanitize the question
const sanitizedQuestion = question.trim().replaceAll('\n', ' ');

//Retrieve the chatbot from firestore
const chatbotReference = await admin.firestore().collection('chatbots').doc(chatbot).get();
const chatbotData = chatbotReference.data();

history.forEach(msg => {
    console.log(`Type: ${msg.role}, Message: ${msg.content}`);
});

const transformedHistory = history.map(msg => {
    return msg.role === 'user'
        ? new CustomHumanMessage({ content: msg.content, name: "user", additional_kwargs: {} })
        : new CustomAIMessage({ content: msg.content, name: chatbotData.name, additional_kwargs: {} });
});
const jsonHistory = transformedHistory.map(msg => msg.toJSON());

console.log('JSON Transformed History:', jsonHistory);

console.log('Transformed History:', transformedHistory);

try {
    // Pinecone setup and initialization
    const pinecone = new Pinecone({
        apiKey: process.env.PINECONE_API_KEY,
        environment: process.env.PINECONE_ENVIRONMENT,
    });

    const index = pinecone.Index(process.env.PINECONE_INDEX_NAME);

    // Create a vector store
    const vectorStore = await PineconeStore.fromExistingIndex(
        new OpenAIEmbeddings({}),
        {
            pineconeIndex: index,
            textKey: 'text',
            namespace: "marting_lings",
        }
    );
    // Create a conversational chain
    const executor = await runChatbot(vectorStore, sessionId, uid, chatbotData);

    // Query the chain
    const response = await executor.run(
        {
            author: chatbot,
            input: sanitizedQuestion,
            chat_history: jsonHistory
        },
    )

    console.log(response)
    // Send the response back to the client
    res.status(200).json(response);
} catch (error) {
    console.error('Error in chat route:', error);
    res.status(500).json({error: error.message || 'Something went wrong'});
}

});

dosubot[bot] commented 5 months ago

🤖

Hey there, @RedouanAssakali! Fancy seeing you here again. Let's dive back into the code, shall we? 🚀

The error BadRequestError: 400 Invalid value for 'content': expected a string, got null is likely due to the fact that the content field of a message in your chat history is null. This field is expected to be a string, and when it's null, the OpenAI API throws this error.

You can see this in the stack trace you provided:

Error in chat route: BadRequestError: 400 Invalid value for 'content': expected a string, got null.
    at APIError.generate (/Users/redouanassakali/Desktop/ahadai/backend/node_modules/openai/error.js:44:20)
    at OpenAI.makeStatusError (/Users/redouanassakali/Desktop/ahadai/backend/node_modules/openai/core.js:255:33)
    at OpenAI.makeRequest (/Users/redouanassakali/Desktop/ahadai/backend/node_modules/openai/core.js:297:30)
    at process.processTicksAndRejections (node:internal/process/task_queue:95:5)
    at async /Users/redouanassakali/Desktop/ahadai/backend/node_modules/@langchain/openai/dist/chat_models.cjs:650:29
    at async RetryOperation._fn (/Users/redouanassakali/Desktop/ahadai/backend/node_modules/p-retry/index.js:50:12) {
  status: 400,

The error is thrown when the OpenAI.makeRequest method is called, which is part of the @langchain/openai package. This method is likely called when you're trying to send a message to the OpenAI API.

To fix this error, you should ensure that all messages in your chat history have a content field that is a string. You can do this by adding a check before you send the chat history to the OpenAI API. Here's an example of how you can do this:

history.forEach(msg => {
  if (typeof msg.content !== 'string') {
    msg.content = String(msg.content);
  }
});

This code will ensure that the content field of all messages in your chat history is a string. If it's not, it will convert it to a string.

I hope this helps! If you have any other questions, feel free to ask.

Sources


This response is meant to be useful and save you time. It is not meant to be a precise solution, but rather a starting point for your own research.

Help me be more useful! Please leave a 👍 if this is helpful and 👎 if it is irrelevant.

If you want to continue the conversation, start your reply with @dosu-bot.

RedouanAssakali commented 5 months ago

@dosu-bot i did what you said but still same error: router.post('/chat-agent', async (req, res) => { // Handles incoming chat requests const {question, history, sessionId, uid, chatbot} = req.body;

// Validate input
if (!sessionId || !uid) {
    return res.status(400).json({message: 'No session id in the request'});
}

// Check if a question is provided
if (!question) {
    return res.status(400).json({message: 'No question in the request'});
}

// Check if a chatbot is provided
if (!chatbot) {
    return res.status(400).json({message: 'No chatbot in the request'});
}

// Sanitize the question
const sanitizedQuestion = question.trim().replaceAll('\n', ' ');

//Retrieve the chatbot from firestore
const chatbotReference = await admin.firestore().collection('chatbots').doc(chatbot).get();
const chatbotData = chatbotReference.data();

history.forEach(msg => {
    console.log(`Type: ${msg.role}, Message: ${msg.content}`);
});
history.forEach(msg => {
    if (typeof msg.content !== 'string') {
        msg.content = String(msg.content);
    }
});

const transformedHistory = history.map(msg => {
    return msg.role === 'user'
        ? new CustomHumanMessage({ content: msg.content, name: "user", additional_kwargs: {} })
        : new CustomAIMessage({ content: msg.content, name: chatbotData.name, additional_kwargs: {} });
});
const jsonHistory = transformedHistory.map(msg => msg.toJSON());

console.log('JSON Transformed History:', jsonHistory);

console.log('Transformed History:', transformedHistory);

try {
    // Pinecone setup and initialization
    const pinecone = new Pinecone({
        apiKey: process.env.PINECONE_API_KEY,
        environment: process.env.PINECONE_ENVIRONMENT,
    });

    const index = pinecone.Index(process.env.PINECONE_INDEX_NAME);

    // Create a vector store
    const vectorStore = await PineconeStore.fromExistingIndex(
        new OpenAIEmbeddings({}),
        {
            pineconeIndex: index,
            textKey: 'text',
            namespace: "marting_lings",
        }
    );
    // Create a conversational chain
    const executor = await runChatbot(vectorStore, sessionId, uid, chatbotData);

    // Query the chain
    const response = await executor.run(
        {
            author: chatbot,
            input: sanitizedQuestion,
            chat_history: jsonHistory
        },
    )

    console.log(response)
    // Send the response back to the client
    res.status(200).json(response);
} catch (error) {
    console.error('Error in chat route:', error);
    res.status(500).json({error: error.message || 'Something went wrong'});
}

});

Error in chat route: BadRequestError: 400 Invalid value for 'content': expected a string, got null. at APIError.generate (/Users/redouanassakali/Desktop/ahadai/backend/node_modules/openai/error.js:44:20) at OpenAI.makeStatusError (/Users/redouanassakali/Desktop/ahadai/backend/node_modules/openai/core.js:255:33) at OpenAI.makeRequest (/Users/redouanassakali/Desktop/ahadai/backend/node_modules/openai/core.js:297:30) at process.processTicksAndRejections (node:internal/process/task_queues:95:5) at async /Users/redouanassakali/Desktop/ahadai/backend/node_modules/@langchain/openai/dist/chat_models.cjs:650:29 at async RetryOperation._fn (/Users/redouanassakali/Desktop/ahadai/backend/node_modules/p-retry/index.js:50:12) { status: 400, headers: { 'access-control-allow-origin': '*', 'alt-svc': 'h3=":443"; ma=86400', 'cf-cache-status': 'DYNAMIC', 'cf-ray': '84b08811dbe9669a-AMS', connection: 'keep-alive', 'content-length': '189', 'content-type': 'application/json', date: 'Thu, 25 Jan 2024 12:31:09 GMT', 'openai-organization': 'user-77baunwvgvena6selz0lma0e', 'openai-processing-ms': '10', 'openai-version': '2020-10-01', server: 'cloudflare', 'set-cookie': '__cf_bm=2p6.Dr1dBpQ_d4d2eJpBVklaOIJbOdBWRHTn2fEeZ9w-1706185869-1-AZvrUhDsrLScutSNxem4svxONz5h4Od9GXIyIuU8YdvsiI52CVPOD9sNw+Gzm2HGZboXbS1ZrpTCntTzggeRbDA=; path=/; expires=Thu, 25-Jan-24 13:01:09 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=WRmnLpxpYUj9cgIivgo88IMaYaRLfuqf01.xy6H0OZ0-1706185869364-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None', 'strict-transport-security': 'max-age=15724800; includeSubDomains', 'x-ratelimit-limit-requests': '5000', 'x-ratelimit-limit-tokens': '160000', 'x-ratelimit-remaining-requests': '4999', 'x-ratelimit-remaining-tokens': '159183', 'x-ratelimit-reset-requests': '12ms', 'x-ratelimit-reset-tokens': '306ms', 'x-request-id': 'e1901cac0e6e2a88fa71cd4d4a8e29b1' }, error: { message: "Invalid value for 'content': expected a string, got null.", type: 'invalid_request_error', param: 'messages.[23].content', code: null }, code: null, param: 'messages.[23].content', type: 'invalid_request_error', attemptNumber: 1, retriesLeft: 6 }

dosubot[bot] commented 5 months ago

🤖

Based on the error message you're encountering, it seems that the 'content' field of one or more of your messages is null. This error is not necessarily related to the type of the 'content' field, but rather its value. Even if you have sanitized the 'content' of the messages to ensure they are strings, if a null value is passed, it will still cause this error.

The 'content' field of all messages in the JSONL file are non-null strings. Therefore, the error must be occurring elsewhere in your application. It's possible that there are other messages being processed that are not included in this file, or that the 'content' field is being modified to null after the sanitization process.

The 'messages' object is part of the OpenAIChatInput interface in the langchainjs codebase. However, the structure of the 'messages' object is not explicitly defined in the provided context. The 'messages' object is an array of CreateChatCompletionRequestMessage objects from the OpenAI client.

Here is the relevant code snippet:

export interface OpenAIChatInput extends OpenAIBaseInput {
  // ...
  /** ChatGPT messages to pass as a prefix to the prompt */
  prefixMessages?: OpenAIClient.Chat.CreateChatCompletionRequestMessage[];
}

Unfortunately, without the definition of OpenAIClient.Chat.CreateChatCompletionRequestMessage from the OpenAI client, I cannot provide the structure of the 'messages' object.

Without additional context, such as the relevant sections of your application code, it's not possible to provide a more specific solution. Could you please provide the relevant sections of your application code where the 'content' field is being processed? This would help in identifying the source of the null value.

Sources


This response is meant to be useful and save you time. It is not meant to be a precise solution, but rather a starting point for your own research.

Help me be more useful! Please leave a 👍 if this is helpful and 👎 if it is irrelevant.

If you want to continue the conversation, start your reply with @dosu-bot.

RedouanAssakali commented 5 months ago

@dosu-bot here my code:

makechain.js: nst { OpenAI } = require('langchain/llms/openai'); const { PineconeStore } = require('langchain/vectorstores/pinecone'); const { ConversationalRetrievalQAChain } = require('langchain/chains'); const { BufferMemory } = require('langchain/memory'); // Import BufferMemory const { FirestoreChatMessageHistory } = require('langchain/stores/message/firestore'); // Import FirestoreChatMessageHistory const { PromptTemplate } = require("langchain/prompts"); const admin = require("firebase-admin"); const {ChatOpenAI} = require("@langchain/openai"); const path = require('path'); const dotenv = require('dotenv').config({path: path.resolve(__dirname, '../.env')}) const CONDENSE_PROMPT = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.

Chat History: {chat_history} Follow Up Input: {question} Standalone question:

`;

const QA_PROMPT = `You are {author}. Use the following pieces of context to answer the question at the end.

{context} Question: {question} Helpful answer ALWAYS output in markdown!!!

`;

const makeChain = (vectorstore, sessionId, uid, chatbot) => {

const firestoreConfig = { collectionName: "chathistory", sessionId: sessionId, userId: uid, config: { projectId: "unity-352c6" }, };

// Initialize the FirestoreChatMessageHistory for chat history // Here a new sub collection for messages was made const chatHistory = new FirestoreChatMessageHistory(firestoreConfig);

console.log("chatHistory", chatHistory) // Create a BufferMemory instance with FirestoreChatMessageHistory const memory = new BufferMemory({ chatHistory, memoryKey: "chat_history", inputKey: "question", outputKey: "text", });

console.log("memory", memory);

const model = new ChatOpenAI({ temperature: 0.5, modelName: chatbot.activeModel, openAIApiKey: process.env.OPENAI_API_KEY

});

return ConversationalRetrievalQAChain.fromLLM( model, vectorstore.asRetriever(), { qaTemplate: QA_PROMPT.replace('{author}', chatbot.name), questionGeneratorTemplate: CONDENSE_PROMPT, returnSourceDocuments: true, memory, }, );

};

module.exports = { makeChain };

agent.js: const {OpenAI} = require('langchain/llms/openai'); const {initializeAgentExecutorWithOptions} = require('langchain/agents');

const {PineconeStore} = require('langchain/vectorstores/pinecone'); const {OpenAIEmbeddings} = require('langchain/embeddings/openai'); const admin = require("firebase-admin"); const {Pinecone} = require('@pinecone-database/pinecone');

const {BufferMemory, ChatMessageHistory} = require('langchain/memory'); // Import BufferMemory const {FirestoreChatMessageHistory} = require('langchain/stores/message/firestore'); // Import FirestoreChatMessageHistory const {PromptTemplate} = require("langchain/prompts");

const {ChainTool} = require("langchain/tools"); const {ConversationalRetrievalQAChain} = require('langchain/chains'); const {response} = require("express"); const {ChatOpenAI} = require("@langchain/openai"); const {makeChain} = require("./makechain"); const {MessagesPlaceholder} = require("@langchain/core/prompts"); const {OpenAIAgentTokenBufferMemory} = require("langchain/agents/toolkits"); const path = require('path'); const dotenv = require('dotenv').config({path: path.resolve(__dirname, '../.env')})

const runChatbot = async (vectorStore, sessionId, uid, chatbotData) => {

const model = new ChatOpenAI({ temperature: 0.5, modelName: chatbotData.activeModel, openAIApiKey: process.env.OPENAI_API_KEY

});

const firestoreConfig = { collectionName: "chathistory", sessionId: sessionId, userId: uid, config: {projectId: "unity-352c6"}, };

// Initialize the FirestoreChatMessageHistory for chat history // Here a new sub collection for messages was made const chatHistory = new FirestoreChatMessageHistory(firestoreConfig);

const memory = new OpenAIAgentTokenBufferMemory({ llm: model, chatHistory: chatHistory, memoryKey: 'chat_history', outputKey: 'output', inputKey: 'input' }) // Create a conversational chain const retrievalChain = makeChain(vectorStore, sessionId, uid, chatbotData)

const qaTool = new ChainTool({ name: qa_tool, description: Books of the ${chatbotData.name} QA - useful for when you need to ask questions about the opinion of ${chatbotData.name}., chain: retrievalChain, returnDirect: true, verbose: true })

const tools = [qaTool]

return await initializeAgentExecutorWithOptions(tools, model, { agentType: 'openai-functions', memory: memory, returnIntermediateSteps: true, agentArgs: { prefix: You are ${chatbotData.name} answering questions about ${chatbotData.name} you can use tools to read his books answer question with this information., memoryPrompts: [new MessagesPlaceholder("chat_history")], } })

} module.exports = {runChatbot}; // Chat API Route router.post('/chat-agent', async (req, res) => { // Handles incoming chat requests const {question, history, sessionId, uid, chatbot} = req.body;

// Validate input
if (!sessionId || !uid) {
    return res.status(400).json({message: 'No session id in the request'});
}

// Check if a question is provided
if (!question) {
    return res.status(400).json({message: 'No question in the request'});
}

// Check if a chatbot is provided
if (!chatbot) {
    return res.status(400).json({message: 'No chatbot in the request'});
}

// Sanitize the question
const sanitizedQuestion = question.trim().replaceAll('\n', ' ');

//Retrieve the chatbot from firestore
const chatbotReference = await admin.firestore().collection('chatbots').doc(chatbot).get();
const chatbotData = chatbotReference.data();

history.forEach(msg => {
    console.log(`Type: ${msg.role}, Message: ${msg.content}`);
});

const transformedHistory = history.map(msg => {
    return msg.role === 'user'
        ? new CustomHumanMessage({ content: msg.content, name: "user", additional_kwargs: {} })
        : new CustomAIMessage({ content: msg.content, name: chatbotData.name, additional_kwargs: {} });
});
const jsonHistory = transformedHistory.map(msg => msg.toJSON());

console.log('JSON Transformed History:', jsonHistory);

console.log('Transformed History:', transformedHistory);

try {
    // Pinecone setup and initialization
    const pinecone = new Pinecone({
        apiKey: process.env.PINECONE_API_KEY,
        environment: process.env.PINECONE_ENVIRONMENT,
    });

    const index = pinecone.Index(process.env.PINECONE_INDEX_NAME);

    // Create a vector store
    const vectorStore = await PineconeStore.fromExistingIndex(
        new OpenAIEmbeddings({}),
        {
            pineconeIndex: index,
            textKey: 'text',
            namespace: "marting_lings",
        }
    );
    // Create a conversational chain
    const executor = await runChatbot(vectorStore, sessionId, uid, chatbotData);

    // Query the chain
    const response = await executor.run(
        {
            author: chatbot,
            input: sanitizedQuestion,
            chat_history: jsonHistory
        },
    )

    console.log(response)
    // Send the response back to the client
    res.status(200).json(response);
} catch (error) {
    console.error('Error in chat route:', error);
    res.status(500).json({error: error.message || 'Something went wrong'});
}

}); same error:Error in chat route: BadRequestError: 400 Invalid value for 'content': expected a string, got null. at APIError.generate (/Users/redouanassakali/Desktop/ahadai/backend/node_modules/openai/error.js:44:20) at OpenAI.makeStatusError (/Users/redouanassakali/Desktop/ahadai/backend/node_modules/openai/core.js:255:33) at OpenAI.makeRequest (/Users/redouanassakali/Desktop/ahadai/backend/node_modules/openai/core.js:297:30) at process.processTicksAndRejections (node:internal/process/task_queues:95:5) at async /Users/redouanassakali/Desktop/ahadai/backend/node_modules/@langchain/openai/dist/chat_models.cjs:650:29 at async RetryOperation._fn (/Users/redouanassakali/Desktop/ahadai/backend/node_modules/p-retry/index.js:50:12) { status: 400, headers: {

dosubot[bot] commented 5 months ago

🤖

Based on the error message you're receiving, it seems like the 'content' field of one of your messages is null when it's expected to be a string. This is likely happening because the 'content' field of the 24th message in your history is null.

The error could be occurring in two places in your code:

  1. When you're creating the examples for the GooglePaLMChatInput. If the 'content' field of either the input or output message is not a string, an error is thrown. However, if the 'content' field is null, it would not throw an error at this point, but it would fail when the request is made to the API.

  2. When you're mapping the BaseMessages to PalmMessages in the _mapBaseMessagesToPalmMessages method. If the 'content' field of a message is not a string, an error is thrown. Again, if the 'content' field is null, it would not throw an error at this point, but it would fail when the request is made to the API.

To resolve this issue, you should ensure that the 'content' field of all messages (both in the examples and in the messages you're sending to the API) is a string and not null. You can do this by adding a check before the request is made to the API.

Here's how you can do it:

In the _mapBaseMessagesToPalmMessages method, add a check for null 'content':

protected _mapBaseMessagesToPalmMessages(
    messages: BaseMessage[]
  ): protos.google.ai.generativelanguage.v1beta2.IMessage[] {
    // remove all 'system' messages
    const nonSystemMessages = messages.filter(
      (m) => getMessageAuthor(m) !== "system"
    );

    // requires alternate human & ai messages. Throw error if two messages are consecutive
    nonSystemMessages.forEach((msg, index) => {
      if (index < 1) return;
      if (
        getMessageAuthor(msg) === getMessageAuthor(nonSystemMessages[index - 1])
      ) {
        throw new Error(
          `Google PaLM requires alternate messages between authors`
        );
      }
    });

    return nonSystemMessages.map((m) => {
      if (typeof m.content !== "string" || m.content === null) {
        throw new Error(
          "ChatGooglePaLM does not support non-string or null message content."
        );
      }
      return {
        author: getMessageAuthor(m),
        content: m.content,
        citationMetadata: {
          citationSources: m.additional_kwargs.citationSources as
            | protos.google.ai.generativelanguage.v1beta2.ICitationSource[]
            | undefined,
        },
      };
    });
  }

And in the constructor where you're creating the examples, add a check for null 'content':

this.examples =
      fields?.examples?.map((example) => {
        if (
          (isBaseMessage(example.input) &&
            (typeof example.input.content !== "string" || example.input.content === null)) ||
          (isBaseMessage(example.output) &&
            (typeof example.output.content !== "string" || example.output.content === null))
        ) {
          throw new Error(
            "GooglePaLM example messages may only have string content and cannot be null."
          );
        }
        return {
          input: {
            ...example.input,
            content: example.input?.content as string,
          },
          output: {
            ...example.output,
            content: example.output?.content as string,
          },
        };
      }) ?? this.examples;

This way, if the 'content' field of a message is null, an error will be thrown before the request is made to the API, and you can catch this error and handle it appropriately in your application.

You can find more information about this in the GooglePaLMChatInput class in the langchain-community library.

Regarding the structure of the req.body object in the chat API route, it contains an array of StoredMessage objects in the history field. Each StoredMessage object has a type field and a data field. The data field is an object of type StoredMessageData which contains the content field among others. The content field can contain a string or an array of objects. Each object in the array can be of two types:

  1. An object with a type field set to "text" and a text field containing a string.
  2. An object with a type field set to "image_url" and an image_url field containing either a string or an object with a url field and an optional detail field which can be "auto", "low"", or"high"`.

You can find more information about this in the StoredMessageData interface in the langchain-core library.

I hope this helps! If you have any other questions, feel free to ask.

Sources


This response is meant to be useful and save you time. It is not meant to be a precise solution, but rather a starting point for your own research.

Help me be more useful! Please leave a 👍 if this is helpful and 👎 if it is irrelevant.

If you want to continue the conversation, start your reply with @dosu-bot.