Closed etlevents closed 9 months ago
This doesn't seem to be an error caused by gptcache, and it looks more like a problem with langchain loading
This doesn't seem to be an error caused by gptcache, and it looks more like a problem with langchain loading
But if I dont use embedding_func and data_manager ,
the first time it works well
But the second time I run it
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/test.ipynb 单元格 18 line 1
----> <a href='[vscode-notebook-cell://ssh-remote%2B192.168.1.23/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/test.ipynb#X22sdnNjb2RlLXJlbW90ZQ%3D%3D?line=0'>1</a>](vscode-notebook-cell://ssh-remote%2B192.168.1.23/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/test.ipynb#X22sdnNjb2RlLXJlbW90ZQ%3D%3D?line=0'%3E1%3C/a%3E) await chain.acall({"input": "你好"})
File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chains/base.py:349](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chains/base.py:349), in Chain.acall(self, inputs, return_only_outputs, callbacks, tags, metadata, include_run_info)
347 except (KeyboardInterrupt, Exception) as e:
348 await run_manager.on_chain_error(e)
--> 349 raise e
350 await run_manager.on_chain_end(outputs)
351 final_outputs: Dict[str, Any] = self.prep_outputs(
352 inputs, outputs, return_only_outputs
353 )
File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chains/base.py:343](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chains/base.py:343), in Chain.acall(self, inputs, return_only_outputs, callbacks, tags, metadata, include_run_info)
337 run_manager = await callback_manager.on_chain_start(
338 dumpd(self),
339 inputs,
340 )
341 try:
342 outputs = (
--> 343 await self._acall(inputs, run_manager=run_manager)
344 if new_arg_supported
345 else await self._acall(inputs)
346 )
347 except (KeyboardInterrupt, Exception) as e:
348 await run_manager.on_chain_error(e)
File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chains/llm.py:238](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chains/llm.py:238), in LLMChain._acall(self, inputs, run_manager)
233 async def _acall(
234 self,
235 inputs: Dict[str, Any],
236 run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
237 ) -> Dict[str, str]:
--> 238 response = await self.agenerate([inputs], run_manager=run_manager)
239 return self.create_outputs(response)[0]
File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chains/llm.py:116](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chains/llm.py:116), in LLMChain.agenerate(self, input_list, run_manager)
114 """Generate LLM result from inputs."""
115 prompts, stop = await self.aprep_prompts(input_list, run_manager=run_manager)
--> 116 return await self.llm.agenerate_prompt(
117 prompts,
118 stop,
119 callbacks=run_manager.get_child() if run_manager else None,
120 **self.llm_kwargs,
121 )
File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chat_models/base.py:425](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chat_models/base.py:425), in BaseChatModel.agenerate_prompt(self, prompts, stop, callbacks, **kwargs)
417 async def agenerate_prompt(
418 self,
419 prompts: List[PromptValue],
(...)
422 **kwargs: Any,
423 ) -> LLMResult:
424 prompt_messages = [p.to_messages() for p in prompts]
--> 425 return await self.agenerate(
426 prompt_messages, stop=stop, callbacks=callbacks, **kwargs
427 )
File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/adapter/langchain_models.py:225](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/adapter/langchain_models.py:225), in LangChainChat.agenerate(self, messages, stop, callbacks, **kwargs)
217 async def agenerate(
218 self,
219 messages: List[List[BaseMessage]],
(...)
222 **kwargs,
223 ) -> LLMResult:
224 self.tmp_args = kwargs
--> 225 return await super().agenerate(messages, stop=stop, callbacks=callbacks)
File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chat_models/base.py:385](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chat_models/base.py:385), in BaseChatModel.agenerate(self, messages, stop, callbacks, tags, metadata, **kwargs)
373 if run_managers:
374 await asyncio.gather(
375 *[
376 run_manager.on_llm_end(
(...)
383 ]
384 )
--> 385 raise exceptions[0]
386 flattened_outputs = [
387 LLMResult(generations=[res.generations], llm_output=res.llm_output)
388 for res in results
389 ]
390 llm_output = self._combine_llm_outputs([res.llm_output for res in results])
File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chat_models/base.py:486](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/langchain/chat_models/base.py:486), in BaseChatModel._agenerate_with_cache(self, messages, stop, run_manager, **kwargs)
482 raise ValueError(
483 "Asked to cache, but no cache found at `langchain.cache`."
484 )
485 if new_arg_supported:
--> 486 return await self._agenerate(
487 messages, stop=stop, run_manager=run_manager, **kwargs
488 )
489 else:
490 return await self._agenerate(messages, stop=stop, **kwargs)
File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/adapter/langchain_models.py:195](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/adapter/langchain_models.py:195), in LangChainChat._agenerate(self, messages, stop, run_manager)
183 async def _agenerate(
184 self,
185 messages: List[List[BaseMessage]],
186 stop: Optional[List[str]] = None,
187 run_manager: Optional[CallbackManagerForLLMRun] = None,
188 ) -> ChatResult:
189 session = (
190 self.session
191 if "session" not in self.tmp_args
192 else self.tmp_args.pop("session")
193 )
--> 195 return await aadapt(
196 self.chat._agenerate,
197 _cache_msg_data_convert,
198 _update_cache_msg_callback,
199 messages=messages,
200 stop=stop,
201 cache_obj=self.cache_obj,
202 session=session,
203 run_manager=run_manager,
204 **self.tmp_args,
205 )
File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/adapter/adapter.py:414](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/adapter/adapter.py:414), in aadapt(llm_handler, cache_data_convert, update_cache_callback, *args, **kwargs)
402 eval_query_data = {
403 "question": pre_store_data,
404 "embedding": embedding_data,
405 }
407 eval_cache_data = {
408 "question": cache_data.question,
409 "answer": cache_data.answers[0].answer,
(...)
412 "embedding": cache_data.embedding_data,
413 }
--> 414 rank = time_cal(
415 chat_cache.similarity_evaluation.evaluation,
416 func_name="evaluation",
417 report_func=chat_cache.report.evaluation,
418 )(
419 eval_query_data,
420 eval_cache_data,
421 extra_param=context.get("evaluation_func", None),
422 )
423 gptcache_log.debug(
424 "similarity: [user question] %s, [cache question] %s, [value] %f",
425 pre_store_data,
426 cache_data.question,
427 rank,
428 )
429 if rank_threshold <= rank:
File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/utils/time.py:9](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/utils/time.py:9), in time_cal.<locals>.inner(*args, **kwargs)
7 def inner(*args, **kwargs):
8 time_start = time.time()
----> 9 res = func(*args, **kwargs)
10 delta_time = time.time() - time_start
11 if cache.config.log_time_func:
File [~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/similarity_evaluation/distance.py:49](https://vscode-remote+ssh-002dremote-002b192-002e168-002e1-002e23.vscode-resource.vscode-cdn.net/home/admin227/sdb/admin227/projects/langchain-ChatGLM/server/chat/~/sdb/admin227/anaconda3/envs/pytorch2/lib/python3.9/site-packages/gptcache/similarity_evaluation/distance.py:49), in SearchDistanceEvaluation.evaluation(self, src_dict, cache_dict, **_)
38 def evaluation(
39 self, src_dict: Dict[str, Any], cache_dict: Dict[str, Any], **_
40 ) -> float:
41 """Evaluate the similarity score of pair.
42 :param src_dict: the query dictionary to evaluate with cache.
43 :type src_dict: Dict
(...)
47 :return: evaluation score.
48 """
---> 49 distance, _ = cache_dict["search_result"]
50 if distance < 0:
51 distance = 0
ValueError: too many values to unpack (expected 2)
the error occurred
The bug has fixed in the 0.1.42. Looking forward your feedback.
Current Behavior
im using Langchain-ChatChat,This part uses the interface of Langchain call openai(streaming=True), but an error occurred
Error
Expected Behavior
No response
Steps To Reproduce
No response
Environment
No response
Anything else?
No response