langchain-ai / langchain

🦜🔗 Build context-aware reasoning applications
https://python.langchain.com
MIT License
93.09k stars 14.97k forks source link

Creating proxy using ChatTongyi, unable to return results properly #22351

Closed wangyaoyong-wyy closed 2 weeks ago

wangyaoyong-wyy commented 4 months ago

Checked other resources

Example Code

from langchain_chroma import Chroma
from langchain_community.chat_models.tongyi import ChatTongyi
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_core.messages import HumanMessage
from conf.configs import DASHSCOPE_API_KEY
from langchain_core.tools import tool, create_retriever_tool
from langchain_community.document_transformers import Html2TextTransformer
from langchain_community.document_loaders import RecursiveUrlLoader
import os
os.environ["DASHSCOPE_API_KEY"] = DASHSCOPE_API_KEY
url = "https://python.langchain.com/v0.2/docs/versions/v0_2/"
loader = RecursiveUrlLoader(url=url, max_depth=100)
docs = loader.load()
html2text = Html2TextTransformer()
docs_transformed = html2text.transform_documents(docs)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=50)
docs = text_splitter.split_documents(docs_transformed)
db = Chroma.from_documents(docs, DashScopeEmbeddings(), persist_directory="D:\ollama")
retriever = db.as_retriever()
langchain_search = create_retriever_tool(retriever, "langchain_search", "Return knowledge related to Langchain")

tools = [langchain_search]
chat = ChatTongyi(streaming=True)
from langgraph.prebuilt import chat_agent_executor

agent_executor = chat_agent_executor.create_tool_calling_executor(chat, tools)
query = "When was Langchain0.2 released?"

for s in agent_executor.stream(
    {"messages": [HumanMessage(content=query)]},
):
    print(s)
    print("----")

Error Message and Stack Trace (if applicable)

D:\miniconda3\envs\chat2\python.exe D:\pythonProject\chat2\langchain_agent_create.py {'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'type': 'function', 'function': {'name': 'langchain_search', 'arguments': ''}, 'id': ''}, {'type': 'function', 'function': {'name': '', 'arguments': '{"query": "'}, 'id': ''}, {'type': 'function', 'function': {'name': '', 'arguments': 'Langchain 0.2 version release'}, 'id': ''}, {'type': 'function', 'function': {'name': '', 'arguments': ' date"}'}, 'id': ''}, {'type': 'function', 'function': {'name': '', 'arguments': ''}, 'id': ''}]}, response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'tool_calls', 'request_id': 'c426dbd5-a597-91a0-9ec4-a55b2591fed1', 'token_usage': {'input_tokens': 189, 'output_tokens': 26, 'total_tokens': 215}}, id='run-13fd4707-8439-4431-9dad-817894f4c3e7-0', tool_calls=[{'name': 'langchain_search', 'args': {'query': 'Langchain 0.2 version release date'}, 'id': ''}])]}}

{'tools': {'messages': [ToolMessage(content='Skip to main content\n\nLangChain 0.2 is out! Leave feedback on the v0.2 docs here. You can view the\nv0.1 docs here.\n\nIntegrationsAPI Reference\n\nMore\n\nSkip to main content\n\nLangChain 0.2 is out! Leave feedback on the v0.2 docs here. You can view the\nv0.1 docs here.\n\nIntegrationsAPI Reference\n\nMore\n\nSkip to main content\n\nLangChain 0.2 is out! Leave feedback on the v0.2 docs here. You can view the\nv0.1 docs here.\n\nIntegrationsAPI Reference\n\nMore\n\n LangChain v0.2\n astream_events v2\n Changes\n Security\n\n Versions\n * v0.2\n\nOn this page\n\n# LangChain v0.2', name='langchain_search', id='28ffa364-791c-488e-9020-1960c4a5672b', tool_call_id='')]}}

Traceback (most recent call last): File "D:\pythonProject\chat2\langchain_agent_create.py", line 49, in for s in agent_executor.stream( File "D:\miniconda3\envs\chat2\Lib\site-packages\langgraph\pregel__init.py", line 876, in stream _panic_or_proceed(done, inflight, step) File "D:\miniconda3\envs\chat2\Lib\site-packages\langgraph\pregel__init__.py", line 1422, in _panic_or_proceed raise exc File "D:\miniconda3\envs\chat2\Lib\concurrent\futures\thread.py", line 58, in run result = self.fn(*self.args, self.kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "D:\miniconda3\envs\chat2\Lib\site-packages\langgraph\pregel\retry.py", line 66, in run_with_retry task.proc.invoke(task.input, task.config) File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_core\runnables\base.py", line 2393, in invoke input = step.invoke( ^^^^^^^^^^^^ File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_core\runnables\base.py", line 3857, in invoke return self._call_with_config( ^^^^^^^^^^^^^^^^^^^^^^^ File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_core\runnables\base.py", line 1503, in _call_with_config context.run( File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_core\runnables\config.py", line 346, in call_func_with_variable_args return func(input, kwargs) # type: ignore[call-arg] ^^^^^^^^^^^^^^^^^^^^^ File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_core\runnables\base.py", line 3731, in _invoke output = call_func_with_variable_args( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_core\runnables\config.py", line 346, in call_func_with_variable_args return func(input, kwargs) # type: ignore[call-arg] ^^^^^^^^^^^^^^^^^^^^^ File "D:\miniconda3\envs\chat2\Lib\site-packages\langgraph\prebuilt\chat_agent_executor.py", line 403, in call_model response = model_runnable.invoke(messages, config) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_core\runnables\base.py", line 4427, in invoke return self.bound.invoke( ^^^^^^^^^^^^^^^^^^ File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_core\language_models\chat_models.py", line 170, in invoke self.generate_prompt( File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_core\language_models\chat_models.py", line 599, in generate_prompt return self.generate(prompt_messages, stop=stop, callbacks=callbacks, kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_core\language_models\chat_models.py", line 456, in generate raise e File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_core\language_models\chat_models.py", line 446, in generate self._generate_with_cache( File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_core\language_models\chat_models.py", line 671, in _generate_with_cache result = self._generate( ^^^^^^^^^^^^^^^ File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_community\chat_models\tongyi.py", line 440, in _generate for chunk in self._stream( File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_community\chat_models\tongyi.py", line 512, in _stream for stream_resp, is_last_chunk in generate_with_last_element_mark( File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_community\llms\tongyi.py", line 135, in generate_with_last_element_mark item = next(iterator) ^^^^^^^^^^^^^^ File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_community\chat_models\tongyi.py", line 361, in _stream_completion_with_retry yield check_response(delta_resp) ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "D:\miniconda3\envs\chat2\Lib\site-packages\langchain_community\llms\tongyi.py", line 66, in check_response raise HTTPError( ^^^^^^^^^^ File "D:\miniconda3\envs\chat2\Lib\site-packages\requests\exceptions.py", line 22, in init if response is not None and not self.request and hasattr(response, "request"): ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "D:\miniconda3\envs\chat2\Lib\site-packages\dashscope\api_entities\dashscope_response.py", line 59, in getattr__ return self[attr]


  File "D:\miniconda3\envs\chat2\Lib\site-packages\dashscope\api_entities\dashscope_response.py", line 15, in __getitem__
    return super().__getitem__(key)
           ^^^^^^^^^^^^^^^^^^^^^^^^
KeyError: 'request'
Exception ignored in: <generator object HttpRequest._handle_request at 0x0000013002FBB240>
RuntimeError: generator ignored GeneratorExit

### Description

I am using ChatTongyi to create a proxy for RAG Q&A, but the code is not executing properly. The document I am referring to is: https://python.langchain.com/v0.2/docs/tutorials/qa_chat_history/#agents

### System Info

python:3.11.9
langchain:0.2.1
platform:windows11
KennyCaty commented 4 months ago

I encountered the same problem using Tongyi The code used is: prompt = hub.pull("hwchase17/openai-tools-agent") from langchain.agents import create_tool_calling_agent

wangyaoyong-wyy commented 4 months ago

I also encountered this issue while using ChatTongyi, and after breakpoint source code feedback, Multiple tool_calls are not supported in message This feature will be supported in the future, Perhaps Tongyi does not currently support multi model scheduling?

wangyaoyong-wyy commented 3 months ago

I encountered the same problem using Tongyi我使用tongyi也遇到同样的问题 The code used is:使用的代码是: prompt = hub.pull("hwchase17/openai-tools-agent") from langchain.agents import create_tool_calling_agent

This issue may occur when using create_tool_calling-agent. According to the error code displayed on the dashscope official website, it is due to passing incorrect parameters that this issue occurs. If you switch to create_json_chat_agent, this issue will not occur

prompt = hub.pull("hwchase17/react-chat-json")
agent = create_json_chat_agent(chat, tools, prompt)
# agent = create_tool_calling_agent(chat,tools,prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True)
resp =agent_executor.invoke({"input": "what is LangChain latest version?"})
print(resp)