zilliztech / GPTCache

Semantic cache for LLMs. Fully integrated with LangChain and llama_index.
https://gptcache.readthedocs.io
MIT License
6.96k stars 490 forks source link

[Bug]: cannot pickle 'module' object #525

Closed etlevents closed 9 months ago

etlevents commented 11 months ago

Current Behavior

im using Langchain-ChatChat,This part uses the interface of Langchain call openai(streaming=True), but an error occurred

import os
os.chdir("/home/admin227/sdb/admin227/projects/langchain-ChatGLM")
from gptcache.embedding import Huggingface
from gptcache.manager import get_data_manager, CacheBase, VectorBase
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
from langchain.chat_models import ChatOpenAI
from gptcache.adapter.langchain_models import LangChainChat
from gptcache import cache
from langchain.prompts.chat import ChatPromptTemplate
# from server.chat.utils import History
from langchain import LLMChain

huggingface = Huggingface(model='/home/admin227/sdb/admin227/projects/embedding/m3e-base')
vector_base = VectorBase('faiss', dimension=huggingface.dimension)
data_manager = get_data_manager('sqlite', vector_base)
cache.init(
embedding_func=huggingface.to_embeddings,
data_manager=data_manager,
similarity_evaluation=SearchDistanceEvaluation(),
)

model = ChatOpenAI(
    # cache=True,
    streaming=True,
    verbose=True,
    openai_api_key=llm_model_dict[LLM_MODEL]["api_key"],
    openai_api_base=llm_model_dict[LLM_MODEL]["api_base_url"],
    model_name=LLM_MODEL
)

chat = LangChainChat(chat=model)#,callbacks=model.callbacks,streaming=True

chat_prompt = ChatPromptTemplate.from_messages(
    [i.to_msg_tuple() for i in []] + [("human", "{input}")])
chain = LLMChain(prompt=chat_prompt, llm=chat)

Error

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/load/serializable.py:74](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/load/serializable.py:74), in Serializable.__init__(self, **kwargs)
     73 def __init__(self, **kwargs: Any) -> None:
---> 74     super().__init__(**kwargs)
     75     self._lc_kwargs = kwargs

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/pydantic/main.py:339](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/pydantic/main.py:339), in pydantic.main.BaseModel.__init__()

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/pydantic/main.py:1066](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/pydantic/main.py:1066), in pydantic.main.validate_model()

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/pydantic/fields.py:439](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/pydantic/fields.py:439), in pydantic.fields.ModelField.get_default()

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/pydantic/utils.py:693](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/pydantic/utils.py:693), in pydantic.utils.smart_deepcopy()

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:172](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:172), in deepcopy(x, memo, _nil)
    170                 y = x
    171             else:
--> 172                 y = _reconstruct(x, memo, *rv)
    174 # If is its own copy, don't memoize.
    175 if y is not x:

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:270](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:270), in _reconstruct(x, memo, func, args, state, listiter, dictiter, deepcopy)
    268 if state is not None:
    269     if deep:
--> 270         state = deepcopy(state, memo)
    271     if hasattr(y, '__setstate__'):
    272         y.__setstate__(state)

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:146](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:146), in deepcopy(x, memo, _nil)
    144 copier = _deepcopy_dispatch.get(cls)
    145 if copier is not None:
--> 146     y = copier(x, memo)
    147 else:
    148     if issubclass(cls, type):

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:230](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:230), in _deepcopy_dict(x, memo, deepcopy)
    228 memo[id(x)] = y
    229 for key, value in x.items():
--> 230     y[deepcopy(key, memo)] = deepcopy(value, memo)
    231 return y

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:172](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:172), in deepcopy(x, memo, _nil)
    170                 y = x
    171             else:
--> 172                 y = _reconstruct(x, memo, *rv)
    174 # If is its own copy, don't memoize.
    175 if y is not x:

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:270](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:270), in _reconstruct(x, memo, func, args, state, listiter, dictiter, deepcopy)
    268 if state is not None:
    269     if deep:
--> 270         state = deepcopy(state, memo)
    271     if hasattr(y, '__setstate__'):
    272         y.__setstate__(state)

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:146](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:146), in deepcopy(x, memo, _nil)
    144 copier = _deepcopy_dispatch.get(cls)
    145 if copier is not None:
--> 146     y = copier(x, memo)
    147 else:
    148     if issubclass(cls, type):

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:230](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:230), in _deepcopy_dict(x, memo, deepcopy)
    228 memo[id(x)] = y
    229 for key, value in x.items():
--> 230     y[deepcopy(key, memo)] = deepcopy(value, memo)
    231 return y

    [... skipping similar frames: _deepcopy_dict at line 230 (3 times), _reconstruct at line 270 (3 times), deepcopy at line 172 (3 times), deepcopy at line 146 (3 times)]

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:172](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:172), in deepcopy(x, memo, _nil)
    170                 y = x
    171             else:
--> 172                 y = _reconstruct(x, memo, *rv)
    174 # If is its own copy, don't memoize.
    175 if y is not x:

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:270](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:270), in _reconstruct(x, memo, func, args, state, listiter, dictiter, deepcopy)
    268 if state is not None:
    269     if deep:
--> 270         state = deepcopy(state, memo)
    271     if hasattr(y, '__setstate__'):
    272         y.__setstate__(state)

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:146](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:146), in deepcopy(x, memo, _nil)
    144 copier = _deepcopy_dispatch.get(cls)
    145 if copier is not None:
--> 146     y = copier(x, memo)
    147 else:
    148     if issubclass(cls, type):

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:230](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:230), in _deepcopy_dict(x, memo, deepcopy)
    228 memo[id(x)] = y
    229 for key, value in x.items():
--> 230     y[deepcopy(key, memo)] = deepcopy(value, memo)
    231 return y

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:161](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/copy.py:161), in deepcopy(x, memo, _nil)
    159 reductor = getattr(x, "__reduce_ex__", None)
    160 if reductor is not None:
--> 161     rv = reductor(4)
    162 else:
    163     reductor = getattr(x, "__reduce__", None)

TypeError: cannot pickle 'module' object

Expected Behavior

No response

Steps To Reproduce

No response

Environment

No response

Anything else?

No response

SimFG commented 11 months ago

This doesn't seem to be an error caused by gptcache, and it looks more like a problem with langchain loading image

etlevents commented 11 months ago

This doesn't seem to be an error caused by gptcache, and it looks more like a problem with langchain loading image

But if I dont use embedding_func and data_manager , image

the first time it works well image

But the second time I run it

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/test.ipynb 单元格 18 line 1
----> <a href='[vscode-notebook-cell://ssh-remote%2B192.168.1.23/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/test.ipynb#X22sdnNjb2RlLXJlbW90ZQ%3D%3D?line=0'>1</a>](vscode-notebook-cell://ssh-remote%2B192.168.1.23/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/test.ipynb#X22sdnNjb2RlLXJlbW90ZQ%3D%3D?line=0'%3E1%3C/a%3E) await chain.acall({"input": "你好"})

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chains/base.py:349](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chains/base.py:349), in Chain.acall(self, inputs, return_only_outputs, callbacks, tags, metadata, include_run_info)
    347 except (KeyboardInterrupt, Exception) as e:
    348     await run_manager.on_chain_error(e)
--> 349     raise e
    350 await run_manager.on_chain_end(outputs)
    351 final_outputs: Dict[str, Any] = self.prep_outputs(
    352     inputs, outputs, return_only_outputs
    353 )

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chains/base.py:343](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chains/base.py:343), in Chain.acall(self, inputs, return_only_outputs, callbacks, tags, metadata, include_run_info)
    337 run_manager = await callback_manager.on_chain_start(
    338     dumpd(self),
    339     inputs,
    340 )
    341 try:
    342     outputs = (
--> 343         await self._acall(inputs, run_manager=run_manager)
    344         if new_arg_supported
    345         else await self._acall(inputs)
    346     )
    347 except (KeyboardInterrupt, Exception) as e:
    348     await run_manager.on_chain_error(e)

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chains/llm.py:238](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chains/llm.py:238), in LLMChain._acall(self, inputs, run_manager)
    233 async def _acall(
    234     self,
    235     inputs: Dict[str, Any],
    236     run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
    237 ) -> Dict[str, str]:
--> 238     response = await self.agenerate([inputs], run_manager=run_manager)
    239     return self.create_outputs(response)[0]

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chains/llm.py:116](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chains/llm.py:116), in LLMChain.agenerate(self, input_list, run_manager)
    114 """Generate LLM result from inputs."""
    115 prompts, stop = await self.aprep_prompts(input_list, run_manager=run_manager)
--> 116 return await self.llm.agenerate_prompt(
    117     prompts,
    118     stop,
    119     callbacks=run_manager.get_child() if run_manager else None,
    120     **self.llm_kwargs,
    121 )

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chat_models/base.py:425](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chat_models/base.py:425), in BaseChatModel.agenerate_prompt(self, prompts, stop, callbacks, **kwargs)
    417 async def agenerate_prompt(
    418     self,
    419     prompts: List[PromptValue],
   (...)
    422     **kwargs: Any,
    423 ) -> LLMResult:
    424     prompt_messages = [p.to_messages() for p in prompts]
--> 425     return await self.agenerate(
    426         prompt_messages, stop=stop, callbacks=callbacks, **kwargs
    427     )

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/adapter/langchain_models.py:225](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/adapter/langchain_models.py:225), in LangChainChat.agenerate(self, messages, stop, callbacks, **kwargs)
    217 async def agenerate(
    218     self,
    219     messages: List[List[BaseMessage]],
   (...)
    222     **kwargs,
    223 ) -> LLMResult:
    224     self.tmp_args = kwargs
--> 225     return await super().agenerate(messages, stop=stop, callbacks=callbacks)

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chat_models/base.py:385](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chat_models/base.py:385), in BaseChatModel.agenerate(self, messages, stop, callbacks, tags, metadata, **kwargs)
    373     if run_managers:
    374         await asyncio.gather(
    375             *[
    376                 run_manager.on_llm_end(
   (...)
    383             ]
    384         )
--> 385     raise exceptions[0]
    386 flattened_outputs = [
    387     LLMResult(generations=[res.generations], llm_output=res.llm_output)
    388     for res in results
    389 ]
    390 llm_output = self._combine_llm_outputs([res.llm_output for res in results])

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chat_models/base.py:486](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chat_models/base.py:486), in BaseChatModel._agenerate_with_cache(self, messages, stop, run_manager, **kwargs)
    482     raise ValueError(
    483         "Asked to cache, but no cache found at `langchain.cache`."
    484     )
    485 if new_arg_supported:
--> 486     return await self._agenerate(
    487         messages, stop=stop, run_manager=run_manager, **kwargs
    488     )
    489 else:
    490     return await self._agenerate(messages, stop=stop, **kwargs)

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/adapter/langchain_models.py:195](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/adapter/langchain_models.py:195), in LangChainChat._agenerate(self, messages, stop, run_manager)
    183 async def _agenerate(
    184     self,
    185     messages: List[List[BaseMessage]],
    186     stop: Optional[List[str]] = None,
    187     run_manager: Optional[CallbackManagerForLLMRun] = None,
    188 ) -> ChatResult:
    189     session = (
    190         self.session
    191         if "session" not in self.tmp_args
    192         else self.tmp_args.pop("session")
    193     )
--> 195     return await aadapt(
    196         self.chat._agenerate,
    197         _cache_msg_data_convert,
    198         _update_cache_msg_callback,
    199         messages=messages,
    200         stop=stop,
    201         cache_obj=self.cache_obj,
    202         session=session,
    203         run_manager=run_manager,
    204         **self.tmp_args,
    205     )

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/adapter/adapter.py:414](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/adapter/adapter.py:414), in aadapt(llm_handler, cache_data_convert, update_cache_callback, *args, **kwargs)
    402     eval_query_data = {
    403         "question": pre_store_data,
    404         "embedding": embedding_data,
    405     }
    407     eval_cache_data = {
    408         "question": cache_data.question,
    409         "answer": cache_data.answers[0].answer,
   (...)
    412         "embedding": cache_data.embedding_data,
    413     }
--> 414 rank = time_cal(
    415     chat_cache.similarity_evaluation.evaluation,
    416     func_name="evaluation",
    417     report_func=chat_cache.report.evaluation,
    418 )(
    419     eval_query_data,
    420     eval_cache_data,
    421     extra_param=context.get("evaluation_func", None),
    422 )
    423 gptcache_log.debug(
    424     "similarity: [user question] %s, [cache question] %s, [value] %f",
    425     pre_store_data,
    426     cache_data.question,
    427     rank,
    428 )
    429 if rank_threshold <= rank:

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/utils/time.py:9](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/utils/time.py:9), in time_cal.<locals>.inner(*args, **kwargs)
      7 def inner(*args, **kwargs):
      8     time_start = time.time()
----> 9     res = func(*args, **kwargs)
     10     delta_time = time.time() - time_start
     11     if cache.config.log_time_func:

File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/similarity_evaluation/distance.py:49](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/similarity_evaluation/distance.py:49), in SearchDistanceEvaluation.evaluation(self, src_dict, cache_dict, **_)
     38 def evaluation(
     39     self, src_dict: Dict[str, Any], cache_dict: Dict[str, Any], **_
     40 ) -> float:
     41     """Evaluate the similarity score of pair.
     42     :param src_dict: the query dictionary to evaluate with cache.
     43     :type src_dict: Dict
   (...)
     47     :return: evaluation score.
     48     """
---> 49     distance, _ = cache_dict["search_result"]
     50     if distance < 0:
     51         distance = 0

ValueError: too many values to unpack (expected 2)

the error occurred

SimFG commented 9 months ago

The bug has fixed in the 0.1.42. Looking forward your feedback.