lmstudio-ai / lmstudio.js

LM Studio TypeScript SDK (pre-release public alpha)
https://lmstudio.ai/docs/lmstudio-sdk/quick-start
Apache License 2.0
271 stars 42 forks source link

embeddings are not generated using langchain js OpenAIEmbeddings #18

Closed geminigeek closed 1 month ago

geminigeek commented 1 month ago

hi , i am following a tutorial here is the github link of same https://github.com/leonvanzyl/langchain-js/blob/lesson-4/retrieval-chain.js , every thing works till lesson 3 , in lesson 4 i am using this code to create embeddings but there is just no msg or error , the program is just stuck , may be i am doing something wrong!

here is the code which is not working , the last line is not triggered where i am logging

console.log("vectorstore :>> ", vectorstore)
import { ChatOpenAI } from "@langchain/openai"
import { createStuffDocumentsChain } from "langchain/chains/combine_documents"
import { ChatPromptTemplate } from "@langchain/core/prompts"
import { CheerioWebBaseLoader } from "langchain/document_loaders/web/cheerio"
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
import { OpenAIEmbeddings } from "@langchain/openai"
import { MemoryVectorStore } from "langchain/vectorstores/memory"
import { createRetrievalChain } from "langchain/chains/retrieval"

let modelToUse = "microsoft/Phi-3-mini-4k-instruct-gguf"
modelToUse =
  "lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF/Meta-Llama-3-8B-Instruct-Q4_K_M.gguf"

// Instantiate the model
const model = new ChatOpenAI({
  model: modelToUse,
  apiKey: "not-required",
  temperature: 0.7,
  configuration: {
    baseURL: "http://127.0.0.1:1234/v1",
    // gpuOffload: "max",
    // noHup: true,
  },
})

// Create Prompt Template from fromMessages
const prompt = ChatPromptTemplate.fromTemplate(
  `Answer the user's question from the following context:
  {context}
  Question: {input}`,
)

const chain = await createStuffDocumentsChain({ llm: model, prompt })

// Use Cheerio to scrape content from webpage and create documents
const loader = new CheerioWebBaseLoader(
  "https://js.langchain.com/docs/expression_language/",
)

const docs = await loader.load()

const embeddings = new OpenAIEmbeddings({
model: modelToUse,
  apiKey: "not-required",
  verbose: true,

  configuration: {
    baseURL: "http://127.0.0.1:1234/v1",
  },
})

// console.log("embeddings :>> ", embeddings)

// Create Vector Store
const vectorstore = await MemoryVectorStore.fromDocuments(docs, embeddings)
console.log("vectorstore :>> ", vectorstore)

found an issue that might me relevant to this https://github.com/langchain-ai/langchain/issues/21318

geminigeek commented 1 month ago

it seems i am using wrong models , i have updated my code with correct models still same issue !!

import { ChatOpenAI } from "@langchain/openai"
import { createStuffDocumentsChain } from "langchain/chains/combine_documents"
import { ChatPromptTemplate } from "@langchain/core/prompts"
import { CheerioWebBaseLoader } from "langchain/document_loaders/web/cheerio"
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
import { OpenAIEmbeddings } from "@langchain/openai"
import { MemoryVectorStore } from "langchain/vectorstores/memory"
import { createRetrievalChain } from "langchain/chains/retrieval"

let modelToUse = "microsoft/Phi-3-mini-4k-instruct-gguf"
modelToUse = "nomic-ai/nomic-embed-text-v1.5-GGUF"
modelToUse = "gaianet/bge-large-en-v1.5-GGUF/bge-large-en-v1.5-f16.gguf"
modelToUse = "gaianet/bge-large-en-v1.5-GGUF"

// Create Prompt Template from fromMessages
const prompt = ChatPromptTemplate.fromTemplate(
  `Answer the user's question from the following context:
  {context}
  Question: {input}`,
)

// const chain = await createStuffDocumentsChain({ llm: model, prompt })

// Use Cheerio to scrape content from webpage and create documents
const loader = new CheerioWebBaseLoader(
  "https://js.langchain.com/docs/expression_language/",
)

const docs = await loader.load()

const embeddings = new OpenAIEmbeddings({
  apiKey: "not-required",
  verbose: true,
  model: modelToUse,
  configuration: {
    baseURL: "http://127.0.0.1:1234/v1",
  },
})

console.log("embeddings :>> ", embeddings)
// console.log("docs :>> ", docs[0].pageContent)

// Create Vector Store
const vectorstore = await MemoryVectorStore.fromDocuments(docs, embeddings)
console.log("vectorstore :>> ", vectorstore)
geminigeek commented 1 month ago

i figured it out i was not loading the embedding model in UI