Open marclelamy opened 1 month ago
@marclelamy can you post the details of the error that you are seeing (from the console/terminal)?
Oh my bad I forgot to paste it.
Unhandled Rejection: u [AI_TypeValidationError]: Type validation failed: Value: {"id":"gen-lp658zQbL7YL6jCo6i3lEFrScqa6","model":"meta-llama/llama-3.1-8b-instruct","object":"chat.completion.chunk","created":1723594224,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null,"logprobs":{"tokens":null,"token_logprobs":null,"top_logprobs":null,"text_offset":null}}]}.
Error message: [
{
"code": "invalid_union",
"unionErrors": [
{
"issues": [
{
"code": "invalid_type",
"expected": "array",
"received": "undefined",
"path": [
"choices",
0,
"logprobs",
"content"
],
"message": "Required"
}
],
"name": "ZodError"
},
{
"issues": [
{
"code": "invalid_type",
"expected": "object",
"received": "undefined",
"path": [
"error"
],
"message": "Required"
}
],
"name": "ZodError"
}
],
"path": [],
"message": "Invalid input"
}
]
at s (/var/task/.next/server/app/page.js:191:7946)
at o (/var/task/.next/server/app/page.js:191:8223)
... 5 lines matching cause stack trace ...
at ensureIsPromise (node:internal/webstreams/util:192:19)
at writableStreamDefaultControllerProcessWrite (node:internal/webstreams/writablestream:1109:5)
at writableStreamDefaultControllerAdvanceQueueIfNeeded (node:internal/webstreams/writablestream:1224:5) {
cause: t [ZodError]: [
{
"code": "invalid_union",
"unionErrors": [
{
"issues": [
{
"code": "invalid_type",
"expected": "array",
"received": "undefined",
"path": [
"choices",
0,
"logprobs",
"content"
],
"message": "Required"
}
],
"name": "ZodError"
},
{
"issues": [
{
"code": "invalid_type",
"expected": "object",
"received": "undefined",
"path": [
"error"
],
"message": "Required"
}
],
"name": "ZodError"
}
],
"path": [],
"message": "Invalid input"
}
]
at get error [as error] (/var/task/.next/server/app/page.js:204:83823)
at Object.validate (/var/task/.next/server/app/page.js:191:7820)
at s (/var/task/.next/server/app/page.js:191:7888)
at o (/var/task/.next/server/app/page.js:191:8223)
at Object.transform (/var/task/.next/server/app/page.js:191:11245)
at ensureIsPromise (node:internal/webstreams/util:192:19)
at transformStreamDefaultControllerPerformTransform (node:internal/webstreams/transformstream:505:18)
at transformStreamDefaultSinkWriteAlgorithm (node:internal/webstreams/transformstream:555:10)
at Object.write (node:internal/webstreams/transformstream:360:14)
at ensureIsPromise (node:internal/webstreams/util:192:19) {
issues: [ [Object] ],
addIssue: [Function (anonymous)],
addIssues: [Function (anonymous)],
errors: [ [Object] ]
},
value: {
id: 'gen-lp658zQbL7YL6jCo6i3lEFrScqa6',
model: 'meta-llama/llama-3.1-8b-instruct',
object: 'chat.completion.chunk',
created: 1723594224,
choices: [ [Object] ]
}
}
Node.js process exited with exit status: 128. The logs above can help with debugging the issue.
@lgrammel
it seems that the an error response chunk is incorrectly parsed. this is provider specific.
which provider are you using? can you add your provider initialization code (in particular if you are using the openai provider with a non-openai service)?
@lgrammel For non OpenAI/Perplexity models I'm using OpenRouter.
Here's most of the code. The strange thing is that the error doesn't get caught in the try/catch. What better way should I implement to fix/catch that?
'use server';
import { streamText } from 'ai';
import { createOpenAI } from '@ai-sdk/openai';
import { FormattedRequestObject, User } from "@/lib/types"
// instantiating clients
const openaiClient = createOpenAI({
apiKey: process.env.OPENAI_API_KEY || "",
})
const openrouterClient = createOpenAI({
baseURL: "https://openrouter.ai/api/v1",
apiKey: process.env.OPENROUTER_API_KEY || "",
headers: {
"HTTP-Referer": "tryblend.ai",
"X-Title": "blendai",
},
})
const perplexityClient = createOpenAI({
apiKey: process.env.PERPLEXITY_API_KEY || "",
baseURL: "https://api.perplexity.ai",
})
// specify type returned from openai
export async function generate(data: FormattedRequestObject): Promise<any> {
// does some pre request stuff
// Setting stream value
const stream = createStreamableValue('');
const processStream = async () => {
// setting client based on model provider
let client
if (selectedModel.provider === "openai") {
client = openaiClient
} else if (selectedModel.provider === "openrouter") {
client = openrouterClient
} else if (selectedModel.provider === "perplexity") {
client = perplexityClient
} else {
return { error: "Invalid model provider" }
}
// Run model
let error: any = false
const result = await streamText({
model: client(selectedModel.providerModelId as string),
messages: messages as any[],
onFinish: async (final) => {
// does some other stuff
}
});
// process stream
let firstToken: any = undefined
result.toAIStream({
onStart: () => {
console.log('on start')
},
onToken: (token) => {
if (firstToken === undefined) {
firstToken = token
console.log('on token')
}
},
});
// Send each chunk one by one to the client
for await (const delta of result.textStream) {
stream.update(delta);
}
// close stream
stream.done();
};
// process stream
try {
processStream();
} catch (error: any) {
return { error: {message: error.message, status: error.status} }
}
return { output: stream.value }
}
Description
When running models like llama or mistral, I'm getting the type validation error but it's not always happening, sometimes the completion runs just fine. In the code below, I see the onStart() log and the error happens right after and the first token doesn't get looged
Code example
Additional context
No response