Believe in AI democratization. llama for nodejs backed by llama-rs, llama.cpp and rwkv.cpp, work locally on your laptop CPU. support llama/alpaca/gpt4all/vicuna/rwkv model.
import {RetrievalQAChain} from 'langchain/chains';
import {HNSWLib} from "langchain/vectorstores";
import {RecursiveCharacterTextSplitter} from 'langchain/text_splitter';
import {LLamaEmbeddings} from "llama-node/dist/extensions/langchain.js";
import {LLM} from "llama-node";
import {LLamaCpp} from "llama-node/dist/llm/llama-cpp.js";
import as fs from 'fs';
import as path from 'path';
const txtFilename = "TrainData";
const txtPath = ./${txtFilename}.txt;
const VECTOR_STORE_PATH = ${txtFilename}.index;
const model = path.resolve(process.cwd(), './h2ogptq-oasst1-512-30B.ggml.q5_1.bin');
const llama = new LLM(LLamaCpp);
const config = {
path: model,
enableLogging: true,
nCtx: 1024,
nParts: -1,
seed: 0,
f16Kv: false,
logitsAll: false,
vocabOnly: false,
useMlock: false,
embedding: true,
useMmap: true,
};
var vectorStore;
const run = async () => {
await llama.load(config);
if (fs.existsSync(VECTOR_STORE_PATH)) {
console.log('Vector Exists..');
vectorStore = await HNSWLib.fromExistingIndex(VECTOR_STORE_PATH, new LLamaEmbeddings({maxConcurrency: 1}, llama));
} else {
console.log('Creating Documents');
const text = fs.readFileSync(txtPath, 'utf8');
const textSplitter = new RecursiveCharacterTextSplitter({chunkSize: 1000});
const docs = await textSplitter.createDocuments([text]);
console.log('Creating Vector');
vectorStore = await HNSWLib.fromDocuments(docs, new LLamaEmbeddings({maxConcurrency: 1}, llama));
await vectorStore.save(VECTOR_STORE_PATH);
}
console.log('Testing Vector via RetrievalQAChain');
const chain = RetrievalQAChain.fromLLM(llama, vectorStore.asRetriever());
const res = await chain.call({
query: "what is a template",
});
console.log({res});
};
run();
At this line "const chain = RetrievalQAChain.fromLLM(llama, vectorStore.asRetriever());" It is throwing this error
TypeError: llm._modelType is not a function
at isChatModel (file:///root/project/node_modules/langchain/dist/chains/prompt_selector.js:34:16)
at ConditionalPromptSelector.getPrompt (file:///root/project/node_modules/langchain/dist/chains/prompt_selector.js:23:17)
at loadQAStuffChain (file:///root/project/node_modules/langchain/dist/chains/question_answering/load.js:20:41)
at RetrievalQAChain.fromLLM (file:///root/project/node_modules/langchain/dist/chains/retrieval_qa.js:69:25)
at run (file:///root/project/index.js:47:36)
This is the code I am using
import {RetrievalQAChain} from 'langchain/chains'; import {HNSWLib} from "langchain/vectorstores"; import {RecursiveCharacterTextSplitter} from 'langchain/text_splitter'; import {LLamaEmbeddings} from "llama-node/dist/extensions/langchain.js"; import {LLM} from "llama-node"; import {LLamaCpp} from "llama-node/dist/llm/llama-cpp.js"; import as fs from 'fs'; import as path from 'path';
const txtFilename = "TrainData"; const txtPath = ./${txtFilename}.txt; const VECTOR_STORE_PATH = ${txtFilename}.index; const model = path.resolve(process.cwd(), './h2ogptq-oasst1-512-30B.ggml.q5_1.bin'); const llama = new LLM(LLamaCpp); const config = { path: model, enableLogging: true, nCtx: 1024, nParts: -1, seed: 0, f16Kv: false, logitsAll: false, vocabOnly: false, useMlock: false, embedding: true, useMmap: true, }; var vectorStore; const run = async () => { await llama.load(config); if (fs.existsSync(VECTOR_STORE_PATH)) { console.log('Vector Exists..'); vectorStore = await HNSWLib.fromExistingIndex(VECTOR_STORE_PATH, new LLamaEmbeddings({maxConcurrency: 1}, llama)); } else { console.log('Creating Documents'); const text = fs.readFileSync(txtPath, 'utf8'); const textSplitter = new RecursiveCharacterTextSplitter({chunkSize: 1000}); const docs = await textSplitter.createDocuments([text]); console.log('Creating Vector'); vectorStore = await HNSWLib.fromDocuments(docs, new LLamaEmbeddings({maxConcurrency: 1}, llama)); await vectorStore.save(VECTOR_STORE_PATH); } console.log('Testing Vector via RetrievalQAChain'); const chain = RetrievalQAChain.fromLLM(llama, vectorStore.asRetriever()); const res = await chain.call({ query: "what is a template", }); console.log({res}); }; run();
At this line "const chain = RetrievalQAChain.fromLLM(llama, vectorStore.asRetriever());" It is throwing this error
file:///root/project/node_modules/langchain/dist/chains/prompt_selector.js:34 return llm._modelType() === "base_chat_model"; ^
TypeError: llm._modelType is not a function at isChatModel (file:///root/project/node_modules/langchain/dist/chains/prompt_selector.js:34:16) at ConditionalPromptSelector.getPrompt (file:///root/project/node_modules/langchain/dist/chains/prompt_selector.js:23:17) at loadQAStuffChain (file:///root/project/node_modules/langchain/dist/chains/question_answering/load.js:20:41) at RetrievalQAChain.fromLLM (file:///root/project/node_modules/langchain/dist/chains/retrieval_qa.js:69:25) at run (file:///root/project/index.js:47:36)
How can we fix this issue?