Closed isnolan closed 4 months ago
Thanks for the feedback! None of the models released now (https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models#gemini-models) support multi-modal multi-turn chat. The SDK will support this as soon as the released model does.
Hi @isnolan
I have tested on SDK version 0.4.0
gemini-ultra-vision
const {VertexAI} = require('@google-cloud/vertexai');
const project = 'your project';
const location = 'us-central1';
async function test() {
const vertex_ai = new VertexAI({project: project, location: location});
// this is the GA namespace in version 0.4.0
// if you keep using preview namespace, it should work the same way
// preview namespace usage: const generativeVisionModel = vertex_ai.preview.getGenerativeModel({...})
const generativeVisionModel = vertex_ai.getGenerativeModel({ model: 'gemini-ultra-vision' });
const chat2 = generativeVisionModel.startChat({
history: [
{
role: 'user',
parts: [{ text: 'I like blue and green' }],
},
{
role: 'model',
parts: [{ text: 'Please help me see if it is the color I like' }],
},
],
});
try {
const result2 = await chat2.sendMessageStream([
{ text: 'Is this my favorite color?' },
{
inline_data: {
data: 'iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8z8BQz0AEYBxVSF+FABJADveWkH6oAAAAAElFTkSuQmCC',
mime_type: 'image/jpeg',
},
},
]);
console.log('[vertex]Multi chat:', JSON.stringify(result2, null, 2));
for await (const item of result2.stream) {
console.log(`->`, item.candidates[0].content.parts[0].text);
}
} catch (err) {
console.warn(err);
}
}
test()
the above code outputs:
[vertex]Multi chat: {
"stream": {},
"response": {}
}
-> Yes, this is your favorite color.
gemini-1.0-pro-vision
I used the same code above except changing the model name to gemini-1.0-pro-vision
. Got 500
error code at the time of this comment. Specifically
GoogleGenerativeAIError: [VertexAI.GoogleGenerativeAIError]: got status: 500 Internal Server Error. {"error":{"code":500,"message":"Internal error encountered.","status":"INTERNAL"}}
at throwErrorIfNotOK (/content/node_modules/@google-cloud/vertexai/build/src/functions/post_fetch_processing.js:33:15)
at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
at async generateContentStream (/content/node_modules/@google-cloud/vertexai/build/src/functions/generate_content.js:90:5) {
stack_trace: undefined
}
/content/node_modules/@google-cloud/vertexai/build/src/models/chat_session.js:126
throw new errors_1.GoogleGenerativeAIError('exception appending chat history', e);
^
GoogleGenerativeAIError: [VertexAI.GoogleGenerativeAIError]: exception appending chat history
at /content/node_modules/@google-cloud/vertexai/build/src/models/chat_session.js:126:19
at process.processTicksAndRejections (node:internal/process/task_queues:95:5) {
stack_trace: GoogleGenerativeAIError: [VertexAI.GoogleGenerativeAIError]: got status: 500 Internal Server Error. {"error":{"code":500,"message":"Internal error encountered.","status":"INTERNAL"}}
at throwErrorIfNotOK (/content/node_modules/@google-cloud/vertexai/build/src/functions/post_fetch_processing.js:33:15)
at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
at async generateContentStream (/content/node_modules/@google-cloud/vertexai/build/src/functions/generate_content.js:90:5) {
stack_trace: undefined
}
}
Node.js v18.19.1
So what I read from the error message is:
GoogleGenerativeAIError: [VertexAI.GoogleGenerativeAIError]: got status: 500 Internal Server Error. {"error":{"code":500,"message":"Internal error encountered.","status":"INTERNAL"}}
gemini-1.0-pro-vision
gemini-ultra-vision
is now available in GA allowlist. OR,gemini-1.0-pro-vision
to fix the internal error.For the latest list of available models, please visit https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models#gemini-models
[Error]
gemini-pro-vision
reports exceptions in chat sessions with history[Code]
[Error]
[Other]
gemini-pro
model.