Open aditya624 opened 9 months ago
If I use LangChain Expression Language (LCEL) an error occurs TypeError: create() got an unexpected keyword argument 'tags'
llm = ChatOpenAI(temperature=0) llm_cache = LangChainChat(chat=llm) sql_response = ( RunnablePassthrough.assign( table_info=get_schema, dialect=get_dialect, few_shot_examples=lambda x: get_similar_data(x["input"]), history=RunnableLambda(memory_zep.load_memory_variables) | itemgetter("history"), ) | prompt | llm.bind(stop=["\nSQLResult:"]) | StrOutputParser() ) full_chain = ( RunnablePassthrough.assign(query=sql_response).assign( table_info=get_schema, response=lambda x: db.run(x["query"]), ) | prompt_reponse | llm_cache ) full_chain.invoke({"input":"..."})
The hope is that there will be no such errors when using LCEL.
No response
langchain 0.1.5 langchain-community 0.0.19 langchain-core 0.1.21 langchain-experimental 0.0.50 langchain-openai 0.0.5 langchainhub 0.1.14 gptcache 0.1.43 openai 1.11.1
TypeError Traceback (most recent call last) Cell In[49], line 2 1 # response = full_chain.invoke(inputs, config={'callbacks': [ConsoleCallbackHandler()]}) ----> 2 response = full_chain.invoke(inputs, config={'callbacks': [ConsoleCallbackHandler()]}) File /opt/conda/envs/agent/lib/python3.9/site-packages/langchain_core/runnables/base.py:2053, in RunnableSequence.invoke(self, input, config) 2051 try: 2052 for i, step in enumerate(self.steps): -> 2053 input = step.invoke( 2054 input, 2055 # mark each step as a child run 2056 patch_config( 2057 config, callbacks=run_manager.get_child(f"seq:step:{i+1}") 2058 ), 2059 ) 2060 # finish the root run 2061 except BaseException as e: File /opt/conda/envs/agent/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py:166, in BaseChatModel.invoke(self, input, config, stop, **kwargs) 155 def invoke( 156 self, 157 input: LanguageModelInput, (...) 161 **kwargs: Any, 162 ) -> BaseMessage: 163 config = ensure_config(config) 164 return cast( 165 ChatGeneration, --> 166 self.generate_prompt( 167 [self._convert_input(input)], 168 stop=stop, 169 callbacks=config.get("callbacks"), 170 tags=config.get("tags"), 171 metadata=config.get("metadata"), 172 run_name=config.get("run_name"), 173 **kwargs, 174 ).generations[0][0], 175 ).message File /opt/conda/envs/agent/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py:544, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs) 536 def generate_prompt( 537 self, 538 prompts: List[PromptValue], (...) 541 **kwargs: Any, 542 ) -> LLMResult: 543 prompt_messages = [p.to_messages() for p in prompts] --> 544 return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs) File /opt/conda/envs/agent/lib/python3.9/site-packages/gptcache/adapter/langchain_models.py:213, in LangChainChat.generate(self, messages, stop, callbacks, **kwargs) 205 def generate( 206 self, 207 messages: List[List[BaseMessage]], (...) 210 **kwargs, 211 ) -> LLMResult: 212 self.tmp_args = kwargs --> 213 return super().generate(messages, stop=stop, callbacks=callbacks) File /opt/conda/envs/agent/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py:408, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, **kwargs) 406 if run_managers: 407 run_managers[i].on_llm_error(e, response=LLMResult(generations=[])) --> 408 raise e 409 flattened_outputs = [ 410 LLMResult(generations=[res.generations], llm_output=res.llm_output) 411 for res in results 412 ] 413 llm_output = self._combine_llm_outputs([res.llm_output for res in results]) File /opt/conda/envs/agent/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py:398, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, **kwargs) 395 for i, m in enumerate(messages): 396 try: 397 results.append( --> 398 self._generate_with_cache( 399 m, 400 stop=stop, 401 run_manager=run_managers[i] if run_managers else None, 402 **kwargs, 403 ) 404 ) 405 except BaseException as e: 406 if run_managers: File /opt/conda/envs/agent/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py:577, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs) 573 raise ValueError( 574 "Asked to cache, but no cache found at `langchain.cache`." 575 ) 576 if new_arg_supported: --> 577 return self._generate( 578 messages, stop=stop, run_manager=run_manager, **kwargs 579 ) 580 else: 581 return self._generate(messages, stop=stop, **kwargs) File /opt/conda/envs/agent/lib/python3.9/site-packages/gptcache/adapter/langchain_models.py:169, in LangChainChat._generate(self, messages, stop, run_manager) 163 session = ( 164 self.session 165 if "session" not in self.tmp_args 166 else self.tmp_args.pop("session") 167 ) 168 cache_obj = self.tmp_args.pop("cache_obj", cache) --> 169 return adapt( 170 self.chat._generate, 171 _cache_msg_data_convert, 172 _update_cache_msg_callback, 173 messages=messages, 174 stop=stop, 175 cache_obj=cache_obj, 176 session=session, 177 run_manager=run_manager, 178 **self.tmp_args, 179 ) File /opt/conda/envs/agent/lib/python3.9/site-packages/gptcache/adapter/adapter.py:241, in adapt(llm_handler, cache_data_convert, update_cache_callback, *args, **kwargs) 238 if search_only_flag: 239 # cache miss 240 return None --> 241 llm_data = time_cal( 242 llm_handler, func_name="llm_request", report_func=chat_cache.report.llm 243 )(*args, **kwargs) 245 if not llm_data: 246 return None File /opt/conda/envs/agent/lib/python3.9/site-packages/gptcache/utils/time.py:9, in time_cal.<locals>.inner(*args, **kwargs) 7 def inner(*args, **kwargs): 8 time_start = time.time() ----> 9 res = func(*args, **kwargs) 10 delta_time = time.time() - time_start 11 if cache.config.log_time_func: File /opt/conda/envs/agent/lib/python3.9/site-packages/langchain_community/chat_models/openai.py:435, in ChatOpenAI._generate(self, messages, stop, run_manager, stream, **kwargs) 429 message_dicts, params = self._create_message_dicts(messages, stop) 430 params = { 431 **params, 432 **({"stream": stream} if stream is not None else {}), 433 **kwargs, 434 } --> 435 response = self.completion_with_retry( 436 messages=message_dicts, run_manager=run_manager, **params 437 ) 438 return self._create_chat_result(response) File /opt/conda/envs/agent/lib/python3.9/site-packages/langchain_community/chat_models/openai.py:352, in ChatOpenAI.completion_with_retry(self, run_manager, **kwargs) 350 """Use tenacity to retry the completion call.""" 351 if is_openai_v1(): --> 352 return self.client.create(**kwargs) 354 retry_decorator = _create_retry_decorator(self, run_manager=run_manager) 356 @retry_decorator 357 def _completion_with_retry(**kwargs: Any) -> Any: File /opt/conda/envs/agent/lib/python3.9/site-packages/openai/_utils/_utils.py:271, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs) 269 msg = f"Missing required argument: {quote(missing[0])}" 270 raise TypeError(msg) --> 271 return func(*args, **kwargs) TypeError: create() got an unexpected keyword argument 'tags'
Current Behavior
If I use LangChain Expression Language (LCEL) an error occurs TypeError: create() got an unexpected keyword argument 'tags'
Expected Behavior
The hope is that there will be no such errors when using LCEL.
Steps To Reproduce
No response
Environment
Anything else?
No response