Copy and paste the following script, which includes a system message:
import * as dotenv from 'dotenv'
import { TokenJS } from './index'
dotenv.config()
// Create the Token.js client
const tokenjs = new TokenJS()
const main = async () => {
// Create a model response
const completion = await tokenjs.chat.completions.create({
// Specify the provider and model
provider: 'gemini',
model: 'gemini-1.0-pro',
// Define your message
messages: [
{
role: 'system',
content: 'Test',
},
{
role: 'user',
content: 'Hello!',
},
],
})
console.log(completion.choices[0])
}
main()
2. Run the script
## Expected Outcome
Script should succeed and log a response from Gemini
## Actual outcome
The following error occurs:
```bash
throw new GoogleGenerativeAIFetchError(`Error fetching from ${url.toString()}: [${response.status} ${response.statusText}] ${message}`, response.status, response.statusText, errorDetails);
^
GoogleGenerativeAIFetchError: [GoogleGenerativeAI Error]: Error fetching from https://generativelanguage.googleapis.com/v1beta/models/gemini-1.0-pro:generateContent: [400 Bad Request] Developer instruction is not enabled for models/gemini-1.0-pro
at handleResponseNotOk (file:///Users/samgoldman/dev/llm/node_modules/.pnpm/@google+generative-ai@0.14.1/node_modules/@google/generative-ai/dist/index.mjs:399:11)
at processTicksAndRejections (node:internal/process/task_queues:95:5)
at makeRequest (file:///Users/samgoldman/dev/llm/node_modules/.pnpm/@google+generative-ai@0.14.1/node_modules/@google/generative-ai/dist/index.mjs:372:9)
at generateContent (file:///Users/samgoldman/dev/llm/node_modules/.pnpm/@google+generative-ai@0.14.1/node_modules/@google/generative-ai/dist/index.mjs:801:22)
at GeminiHandler.create (/Users/samgoldman/dev/llm/src/handlers/gemini.ts:514:23)
at main (/Users/samgoldman/dev/llm/src/a.ts:12:22) {
status: 400,
statusText: 'Bad Request',
errorDetails: undefined
}
Node.js v18.17.
Steps to Reproduce
system
message:import { TokenJS } from './index'
dotenv.config()
// Create the Token.js client const tokenjs = new TokenJS()
const main = async () => { // Create a model response const completion = await tokenjs.chat.completions.create({ // Specify the provider and model provider: 'gemini', model: 'gemini-1.0-pro', // Define your message messages: [ { role: 'system', content: 'Test', }, { role: 'user', content: 'Hello!', }, ], }) console.log(completion.choices[0]) } main()