False
True
Traceback (most recent call last):
File "/home/theinhumaneme/Documents/NebuLogic/conversation-bot/development/scripts/chatbot-postgres-test.py", line 129, in <module>
execution_time = timeit.timeit(lambda: llm.invoke("Tell me a joke"), number=1)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/timeit.py", line 237, in timeit
return Timer(stmt, setup, timer, globals).timeit(number)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/timeit.py", line 180, in timeit
timing = self.inner(it, self.timer)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "<timeit-src>", line 6, in inner
File "/home/theinhumaneme/Documents/NebuLogic/conversation-bot/development/scripts/chatbot-postgres-test.py", line 129, in <lambda>
execution_time = timeit.timeit(lambda: llm.invoke("Tell me a joke"), number=1)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/theinhumaneme/Documents/NebuLogic/conversation-bot/venv/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 153, in invoke
self.generate_prompt(
File "/home/theinhumaneme/Documents/NebuLogic/conversation-bot/venv/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 546, in generate_prompt
return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/theinhumaneme/Documents/NebuLogic/conversation-bot/venv/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 407, in generate
raise e
File "/home/theinhumaneme/Documents/NebuLogic/conversation-bot/venv/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 397, in generate
self._generate_with_cache(
File "/home/theinhumaneme/Documents/NebuLogic/conversation-bot/venv/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 579, in _generate_with_cache
cache_val = llm_cache.lookup(prompt, llm_string)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/theinhumaneme/Documents/NebuLogic/conversation-bot/venv/lib/python3.11/site-packages/langchain_community/cache.py", line 813, in lookup
res = get(prompt, cache_obj=_gptcache)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/theinhumaneme/Documents/NebuLogic/conversation-bot/venv/lib/python3.11/site-packages/gptcache/adapter/api.py", line 124, in get
res = adapt(
^^^^^^
File "/home/theinhumaneme/Documents/NebuLogic/conversation-bot/venv/lib/python3.11/site-packages/gptcache/adapter/adapter.py", line 33, in adapt
raise NotInitError()
gptcache.utils.error.NotInitError: The cache should be inited before using
Expected Behavior
I should be able to use the cache normally
Steps To Reproduce
latest gptcache
sample code
# Import Streamlit and other necessary libraries
# from langchain_community.document_loaders import CSVLoader
import os
from langchain_openai import ChatOpenAI
from langchain_openai import OpenAI
# from langchain_openai import OpenAIEmbeddings
# from pprint import PrettyPrinter
# from langchain_community.vectorstores.pgvector import PGVector
# from langchain_openai import OpenAIEmbeddings
# from langchain_community.document_loaders import PyPDFLoader
# pp = PrettyPrinter()
os.environ["OPENAI_API_KEY"] = ""
CONNECTION_STRING = "postgresql+psycopg2://postgres:postgres@localhost:5432/postgres"
import hashlib
import timeit
from gptcache import Cache, cache
from gptcache.adapter.api import init_similar_cache
from langchain.globals import set_llm_cache
from langchain_community.cache import GPTCache, SQLiteCache
from langchain_openai import OpenAIEmbeddings
from gptcache.manager import get_data_manager, CacheBase, VectorBase
from langchain.globals import set_llm_cache
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
def get_content_func(data, **_):
return data.get("prompt").split("Question")[-1]
openai_embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
cache_base = CacheBase(
"postgresql",
sql_url="postgresql+psycopg2://postgres:postgres@127.0.0.1:5432/postgres",
)
vector_base = VectorBase(
"pgvector",
host="127.0.0.1",
port="5432",
user="postgres",
password="postgres",
dimension=1536,
)
data_manager = get_data_manager(cache_base, vector_base)
# cache.init(
# pre_embedding_func=get_content_func,
# embedding_func=OpenAIEmbeddings(model="text-embedding-3-small").embed_query,
# data_manager=data_manager,
# similarity_evaluation=SearchDistanceEvaluation(),
# )
def init_gptcache(cache_obj: Cache, llm: str):
print(cache.has_init)
cache.init(
pre_embedding_func=get_content_func,
embedding_func=OpenAIEmbeddings(model="text-embedding-3-small").embed_query,
data_manager=data_manager,
similarity_evaluation=SearchDistanceEvaluation(),
)
print(cache.has_init)
llm_model = "gpt-3.5-turbo-0125"
llm = ChatOpenAI(temperature=0, model_name=llm_model)
set_llm_cache(GPTCache(init_gptcache))
execution_time = timeit.timeit(lambda: llm.invoke("Tell me a joke"), number=1)
print(f"Execution time: {execution_time} seconds")
execution_time = timeit.timeit(lambda: llm.invoke("Tell me a joke"), number=1)
print(f"Execution time: {execution_time} seconds")
Environment
No response
Anything else?
i get this error when i use the set_llm_cache() from langchain
it works fine when i use it normally i.e init but fails when i am trying to embed my text using the openai embeddings i get an error stating that to_embeddings doesn't exist when i change the code in the function to embed_query i get unexpected extra_params passed.
Current Behavior
i get a stack trace
Expected Behavior
I should be able to use the cache normally
Steps To Reproduce
Environment
No response
Anything else?
i get this error when i use the
set_llm_cache()
from langchainit works fine when i use it normally i.e init but fails when i am trying to embed my text using the openai embeddings i get an error stating that
to_embeddings
doesn't exist when i change the code in the function toembed_query
i get unexpectedextra_params
passed.Thank you :D