Atome-FE / llama-node

Believe in AI democratization. llama for nodejs backed by llama-rs, llama.cpp and rwkv.cpp, work locally on your laptop CPU. support llama/alpaca/gpt4all/vicuna/rwkv model.
https://llama-node.vercel.app/
Apache License 2.0
862 stars 62 forks source link

[Error: Failed to get embeddings] { code: 'GenericFailure' } #110

Open ThomasDev28 opened 1 year ago

ThomasDev28 commented 1 year ago

This code:

import { uuid } from "uuidv4";
import { LLamaEmbeddings } from "llama-node/dist/extensions/langchain.js";
import path from "path";
import { LLM } from "llama-node";
import { LLamaCpp } from "llama-node/dist/llm/llama-cpp.js";

const model = path.resolve(
  process.cwd(),
  "models/llama-2-7b-chat.ggmlv3.q4_0.bin"
);

const llama = new LLM(LLamaCpp);

const config = {
  modelPath: model,
  enableLogging: true,
  numPredict: 128,
  temperature: 0.2,
  topP: 1,
  topK: 40,
  repeatPenalty: 1,
  repeatLastN: 64,
  nCtx: 1024,
  seed: 0,
  f16Kv: false,
  logitsAll: false,
  vocabOnly: false,
  useMlock: false,
  useMmap: true,
  nGpuLayers: 0,
};

const run = async () => {
  await llama.load(config);
  const embeddings = new LLamaEmbeddings({ maxConcurrency: 1 } , llama )
  const documents = await processFiles("./documents");
  const documentsArr = documents.map((doc) => doc.pageContent);
  const embeddingsArr = await embeddings.embedDocuments(documentsArr);
}

run();

Output:

node:internal/process/esm_loader:91
    internalBinding('errors').triggerUncaughtException(
                              ^
[Error: Failed to get embeddings] { code: 'GenericFailure' }