Drlordbasil / GroqAgenticWorkflow

Fast paced, intelligent teamwork based agentic workflow.
33 stars 9 forks source link

add new llama embedding and RAG memory. #6

Closed Drlordbasil closed 6 months ago

Drlordbasil commented 6 months ago

Current class needs work:

import ollama
import chromadb

class LlamaRAG:
  def __init__(self):
    self.documents = [
      "Llamas are members of the camelid family meaning they're pretty closely related to vicuñas and camels",
      "Llamas were first domesticated and used as pack animals 4,000 to 5,000 years ago in the Peruvian highlands",
      "Llamas can grow as much as 6 feet tall though the average llama between 5 feet 6 inches and 5 feet 9 inches tall",
      "Llamas weigh between 280 and 450 pounds and can carry 25 to 30 percent of their body weight",
      "Llamas are vegetarians and have very efficient digestive systems",
      "Llamas live to be about 20 years old, though some only live for 15 years and others live to be 30 years old",
    ]
    self.client = chromadb.Client()
    self.collection = self.client.create_collection(name="docs")

  def store_documents(self):
    for i, d in enumerate(self.documents):
      response = ollama.embeddings(model="mxbai-embed-large", prompt=d)
      embedding = response["embedding"]
      self.collection.add(
        ids=[str(i)],
        embeddings=[embedding],
        documents=[d]
      )

  def query_documents(self, prompt):
    response = ollama.embeddings(
      prompt=prompt,
      model="mxbai-embed-large"
    )
    results = self.collection.query(
      query_embeddings=[response["embedding"]],
      n_results=1
    )
    data = results['documents'][0][0]
    output = ollama.generate(
      model="stablelm2",
      prompt=f"Using this data: {data}. Respond to this prompt: {prompt}"
    )
    return output['response']

if __name__ == "__main__":
  rag = LlamaRAG()
  rag.store_documents()
  prompt = "What are some interesting facts about llamas?"
  response = rag.query_documents(prompt)
  print(response)