Open abhijeetsuryawanshi12 opened 1 week ago
import os
import subprocess
from mem0 import Memory
os.makedirs("./qdrant_data", exist_ok=True)
config = { "vector_store": { "provider": "qdrant", "config": { "collection_name": "test_collection", "path": "./qdrant_data", # Local persistent storage "host": "localhost", "port": 6333, "embedding_model_dims": 768, # Verified dimension for nomic-embed-text }, }, "llm": { "provider": "ollama", "config": { "model": "llama3:latest", # Updated to a more reliable model "temperature": 0, "max_tokens": 8000, "ollama_base_url": "http://localhost:11434", }, }, "embedder": { "provider": "ollama", "config": { "model": "nomic-embed-text:latest", "ollama_base_url": "http://localhost:11434", }, }, }
m = Memory.from_config(config)
m.add("I'm visiting Paris", user_id="john", metadata={"type": "travel"}) m.add("I'm listening to music", user_id="john", metadata={"type": "activity"})
memories = m.get_all(user_id="john") print(memories)
### Solution for GraphRAG and Neo4j Integration
1. Corrected Configuration for GraphRAG with Ollama:
```python
from mem0 import Memory
config = {
"llm": {
"provider": "ollama",
"config": {
"model": "llama3:latest", # Updated model name
"temperature": 0,
"max_tokens": 8000,
"ollama_base_url": "http://localhost:11434",
},
},
"graph_store": {
"provider": "neo4j",
"config": {
"url": "neo4j+s://your-neo4j-instance.databases.neo4j.io",
"username": "neo4j",
# IMPORTANT: Use environment variables for sensitive credentials
"password": os.getenv("NEO4J_PASSWORD")
}
},
"version": "v1.1"
}
# Initialize Memory
m = Memory.from_config(config)
# Add memory with user context
m.add("I like pizza", user_id="alice")
# Retrieve and search memories
memories = m.get_all(user_id="alice")
search_result = m.search("tell me my name.", user_id="alice")
For Qdrant:
For GraphRAG and Neo4j:
General Troubleshooting:
ollama serve
)ollama list
)"ERROR:root:Error in new_memories_with_actions: 'event'":
"OpenAI API key error":
Let me know if any of that works. If there's any other data you con provide I can probably help you troubleshoot further.
ERROR:root:Error in new_memories_with_actions: 'event'
This error was not fixed with the given resolutions @AbhigyaWangoo
Full code:
from mem0 import Memory
config = {
"vector_store": {
"provider": "qdrant",
"config": {
"collection_name": "memory_test",
"host": "localhost",
"path": "qdrant_storage",
"port": 6333,
"embedding_model_dims": 384, # Change this according to your local model's dimensions
},
},
"llm": {
"provider": "ollama",
"config": {
"model": "llama3.1:8b",
"temperature": 0.5,
"max_tokens": 8000,
"ollama_base_url": "http://localhost:11434", # Ensure this URL is correct
},
},
"embedder": {
"provider": "huggingface",
"config": {
"model": "all-MiniLM-L6-v2"
}
},
}
m = Memory.from_config(config)
statements = [
"I love hiking on weekends to enjoy nature.",
"My favorite color is blue.",
"I recently started learning how to play the guitar.",
"Cooking is my way of relaxing after a long day.",
"I have a black Labrador named Max.",
"Reading historical fiction is my favorite pastime.",
"I can’t start my day without a cup of coffee.",
"I enjoy painting landscapes during my free time.",
"One of my goals is to visit all seven continents.",
"I’m a huge fan of mystery novels.",
"I love experimenting with new recipes in the kitchen.",
"Photography is a hobby I picked up during the pandemic.",
"I’m currently training for a half-marathon.",
"My favorite sport to watch is basketball.",
"I enjoy building and flying model airplanes.",
"I speak three languages fluently.",
"Baking desserts for friends and family makes me happy.",
"I have a small collection of vintage records.",
"Gardening is a hobby that helps me unwind.",
"I love binge-watching documentaries about space."
]
for s in statements:
m.add(s, user_id = "test", metadata={"category": "hobbies"})
Errors:
ERROR:root:Error in new_memories_with_actions: 'event' ERROR:root:Error in new_memories_with_actions: 'event' /home/remotessh/memtest/test.py:72: DeprecationWarning: The current add API output format is deprecated. To use the latest format, set
api_version='v1.1'. The current format will be removed in mem0ai 1.1.0 and later versions. m.add(s, user_id = "test", metadata={"category": "hobbies"}) ERROR:root:Error in new_memories_with_actions: 'event' ERROR:root:Error in new_memories_with_actions: 'event'
It has added two memories, wont go further
Hi there! I understand you're having issues with Mem0 when using it with Ollama locally. Let me help you resolve these problems.
The issues you're experiencing can be broken down into two main problems:
Here's how to fix these issues:
import os
from mem0 import Memory
os.makedirs("./qdrant_data", exist_ok=True)
config = { "vector_store": { "provider": "qdrant", "config": { "collection_name": "test_collection", "path": "./qdrant_data", # Add local storage path "host": "localhost", "port": 6333, "embedding_model_dims": 384 # Correct dimension for nomic-embed-text }, }, "llm": { "provider": "ollama", "config": { "model": "llama2", # Use a stable model "temperature": 0, "max_tokens": 8000, "ollama_base_url": "http://localhost:11434", }, }, "embedder": { "provider": "ollama", "config": { "model": "nomic-embed-text", "ollama_base_url": "http://localhost:11434", }, }, "version": "v1.1" }
m = Memory.from_config(config)
m.add("I'm visiting Paris", user_id="john", metadata={"type": "event", "category": "travel"})
2. For the GraphRAG configuration, remove OpenAI-specific settings:
```python
config = {
"llm": {
"provider": "ollama",
"config": {
"model": "llama2",
"temperature": 0,
"max_tokens": 8000,
"ollama_base_url": "http://localhost:11434",
},
},
"graph_store": {
"provider": "neo4j",
"config": {
"url": "neo4j://localhost:7687", # Use local Neo4j or your actual URL
"username": "neo4j",
"password": os.getenv("NEO4J_PASSWORD") # Use environment variable
}
},
"version": "v1.1"
}
Key Points to Check:
Make sure Ollama is running:
ollama serve
Verify model availability:
ollama list
Pull required models:
ollama pull llama2
ollama pull nomic-embed-text
Ensure Qdrant is running and accessible at localhost:6333
Common Issues and Solutions:
Let me know if you need any clarification or run into other issues!
Didn't work
🐛 Describe the bug
First sample is for Qdrant server and ollama
import os import subprocess from collections import deque from mem0 import Memory
Configuration for Mem0 and Ollama
config = { "vector_store": { "provider": "qdrant", "config": { "collection_name": "test", "host": "localhost", "port": 6333, "embedding_model_dims": 768, # Match this to your local model's embedding dimensions }, }, "llm": { "provider": "ollama", "config": { "model": "llama3.2:3b-instruct-q4_K_M", "temperature": 0, "max_tokens": 8000, "ollama_base_url": "http://localhost:11434", # Make sure Ollama is running at this URL }, }, "embedder": { "provider": "ollama", "config": { "model": "nomic-embed-text:latest", "ollama_base_url": "http://localhost:11434", }, }, "version": "v1.1" }
m = Memory.from_config(config)
Add a memory
m.add("I'm visiting Paris", user_id="john") m.add("I'm listening to music", user_id="john") m.add("I'm trying to learn French", user_id="john") m.add("I'm going to the beach", user_id="john") m.add("I'm going to the gym", user_id="john")
Retrieve memories
memories = m.get_all(user_id="john") print(memories)
Above code is giving error as """ERROR:root:Error in new_memories_with_actions: 'event'"""
Second sample is for graphrag and ollama
from mem0 import Memory
config = { "llm": { "provider": "ollama", "config": { "model": "llama3.2:3b-instruct-q4_K_M", "temperature": 0, "max_tokens": 8000, "ollama_base_url": "http://localhost:11434", # Make sure Ollama is running at this URL,
"api_key": "",
}
m = Memory.from_config(config_dict=config)
m.add("I like pizza", user_id="alice")
m.get_all(user_id="alice")
m.search("tell me my name.", user_id="alice")
Above sample giving error as """openai.OpenAIError: The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable
"""