Open ccq1 opened 2 days ago
This is a potential solution to the issue.
@ccq1, their were couple of issues with your code which I have corrected, please have a look at below update code
from langchain_core.tools import tool
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
from langgraph.constants import START, END
from langgraph.graph import StateGraph, MessagesState
from langgraph.prebuilt import ToolNode
import json
llm = ChatOpenAI()
@tool
def check_code_syntax(code:str) -> str: # your tool
"""Check Code Syntax"""
# assuming this tool does something
return code
prompt_system = """
You are a transcription engineer. I will provide you with some code,
and you need to transcribe it into Python. You must use the check_code_syntax tool.
If the last tool execution resulted in a code error, you need to correct the code based on the error
message and then invoke the check_code_syntax tool again to see if it can compile successfully.
If it passes the check, meaning the return value is_success is True, you should output TASK_END + the corrected code.
"""
tools = [check_code_syntax]
def create_agent():
prompt = ChatPromptTemplate.from_messages(
[
("system", prompt_system),
MessagesPlaceholder(variable_name='user')
]
)
agent = prompt | llm.bind_tools(tools) # your prompt does not have {tool_name variable therefore no need to bind their}
return agent
rewrite_agent = create_agent()
def should_continue(state: MessagesState) -> bool:
"""Return the next node to execute."""
last_message = state["messages"][-1]
# If there is no function call, then we finish
if not last_message.tool_calls:
return END
# Otherwise if there is, we continue
args = last_message.tool_calls[0]['args']
if not isinstance(args, dict):
args = json.loads(args)
return "tools"
def chatbot(state: MessagesState):
return {'messages': rewrite_agent.invoke(state["messages"])} # This should return in form MessageState
workflow = StateGraph(MessagesState)
workflow.add_node('tools', ToolNode(tools))
workflow.add_node('rewrite_agent', chatbot)
workflow.add_edge(START, 'rewrite_agent')
workflow.add_edge('tools', 'rewrite_agent')
workflow.add_conditional_edges('rewrite_agent', should_continue, ['tools', END])
graph = workflow.compile()
graph.invoke({"messages": [('human', 'Please check the following code for me using `check_code_syntax`: lambda x:x')]}) # graph should be called rather the agent in the graph
Checked other resources
Example Code
prompt_system = """ You are a transcription engineer. I will provide you with some code, and you need to transcribe it into Python. You must use the check_code_syntax tool. If the last tool execution resulted in a code error, you need to correct the code based on the error message and then invoke the check_code_syntax tool again to see if it can compile successfully. If it passes the check, meaning the return value is_success is True, you should output TASK_END + the corrected code. """ tools = [check_code_syntax] def create_agent(): prompt = ChatPromptTemplate.from_messages( [ ( "system", prompt_system, ), MessagesPlaceholder(variable_name='user') ] ) prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) agent = prompt | llm.bind_tools(tools) return agent
rewrite_agent = create_agent()
def should_continue(state:MessagesState) -> bool: """Return the next node to execute.""" last_message = state["messages"][-1]
If there is no function call, then we finish
def chatbot(state: State): return [rewrite_agent.invoke(state["messages"])]
workflow = StateGraph(MessagesState) workflow.add_node('tools', ToolNode(tools)) workflow.add_node('rewrite_agent', chatbot) workflow.add_edge(START, 'rewrite_agent') workflow.add_edge('tools','rewrite_agent') workflow.add_conditional_edges('rewrite_agent',should_continue,['tools',END])
graph = workflow.compile()
Error Message and Stack Trace (if applicable)
[Error Message, I add some print for debug]
================================ Human Message =================================
code is def add(a, b): prnt(a+b) add(3, 5)
_dict {'content': '', 'refusal': None, 'role': 'assistant', 'function_call': None, 'tool_calls': [{'id': '0192c19eef3bb5786799c95962fe59d0', 'function': {'arguments': '{"code": "def add(a, b):\n prnt(a+b)\nadd(3, 5)", "language": "python"}', 'name': 'check_code_syntax'}, 'type': 'function'}]} role assistant name None id None raw_tool_call {'id': '0192c19eef3bb5786799c95962fe59d0', 'function': {'arguments': '{"code": "def add(a, b):\n prnt(a+b)\nadd(3, 5)", "language": "python"}', 'name': 'check_code_syntax'}, 'type': 'function'} toolcalls [] ================================== Ai Message ================================== Tool Calls: check_code_syntax (0192c19eef3bb5786799c95962fe59d0) Call ID: 0192c19eef3bb5786799c95962fe59d0 Args: code: def add(a, b): prnt(a+b) add(3, 5) language: python ================================= Tool Message ================================= Name: check_code_syntax
FalseTraceback (most recent call last): File "/tmp/tmp4ljh8rbe", line 3, in
add(3, 5)
File "/tmp/tmp4ljh8rbe", line 2, in add
prnt(a+b)
^^^^
NameError: name 'prnt' is not defined. Did you mean: 'print'?
_dict {'content': "It looks like there's a typo in the code. The function
for chunk in graph.stream({"messages": [inputmessage]},
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langgraph/pregel/init.py", line 1298, in stream
for in runner.tick(
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langgraph/pregel/runner.py", line 56, in tick
run_with_retry(t, retry_policy)
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langgraph/pregel/retry.py", line 29, in run_with_retry
task.proc.invoke(task.input, config)
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langgraph/utils/runnable.py", line 409, in invoke
input = context.run(step.invoke, input, config, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langgraph/utils/runnable.py", line 183, in invoke
ret = context.run(self.func, input, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/son1enardo/nj_rewrite/re_write_agent.py", line 141, in chatbot
return [rewrite_agent.invoke(state["messages"])]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langchain_core/runnables/base.py", line 3024, in invoke
input = context.run(step.invoke, input, config)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langchain_core/runnables/base.py", line 5354, in invoke
return self.bound.invoke(
^^^^^^^^^^^^^^^^^^
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 286, in invoke
self.generate_prompt(
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 786, in generate_prompt
return self.generate(prompt_messages, stop=stop, callbacks=callbacks, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 643, in generate
raise e
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 633, in generate
self._generate_with_cache(
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 851, in _generate_with_cache
result = self._generate(
^^^^^^^^^^^^^^^
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langchain_openai/chat_models/base.py", line 690, in _generate
return self._create_chat_result(response, generation_info)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langchain_openai/chat_models/base.py", line 727, in _create_chat_result
message = _convert_dict_to_message(res["message"])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langchain_openai/chat_models/base.py", line 138, in _convert_dict_to_message
return AIMessage(
^^^^^^^^^^
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langchain_core/messages/ai.py", line 179, in init
super().init(content=content, kwargs)
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langchain_core/messages/base.py", line 76, in init
super().init(content=content, *kwargs)
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/langchain_core/load/serializable.py", line 111, in init
super().init(args, **kwargs)
File "/home/son1enardo/miniconda3/envs/langgraph/lib/python3.11/site-packages/pydantic/main.py", line 212, in init
validated_self = self.__pydantic_validator__.validate_python(data, self_instance=self)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
pydantic_core._pydantic_core.ValidationError: 1 validation error for AIMessage
tool_calls.0.args
Input should be a valid dictionary [type=dict_type, input_value='{"code": "def add(a, b):..., "language": "python"}', input_type=str]
For further information visit https://errors.pydantic.dev/2.9/v/dict_type
prnt
should beprint
. Let me correct that and check the code again.\n", 'refusal': None, 'role': 'assistant', 'function_call': None, 'tool_calls': [{'id': '0192c19efcad36cfccc5be45121408cf', 'function': {'arguments': '"{\"code\": \"def add(a, b):\\n print(a+b)\\nadd(3, 5)\", \"language\": \"python\"}"', 'name': 'check_code_syntax'}, 'type': 'function'}]} role assistant name None id None raw_tool_call {'id': '0192c19efcad36cfccc5be45121408cf', 'function': {'arguments': '"{\"code\": \"def add(a, b):\\n print(a+b)\\nadd(3, 5)\", \"language\": \"python\"}"', 'name': 'check_code_syntax'}, 'type': 'function'} toolcalls [] Traceback (most recent call last): File "/home/son1enardo/nj_rewrite/re_write_agent.py", line 167, inDescription
The LLM returns two AIMessage objects, but there is inconsistency in how tool_call.0.args is structured between them. In the first AIMessage, the tool_call.0.args is returned correctly as expected, like this:
{'arguments': '{"code": "def add(a, b):\\n prnt(a+b)\\nadd(3, 5)", "language": "python"}', 'name': 'check_code_syntax'}
However, in the second AIMessage, the tool_call.0.args is returned as a nested, escaped string, like this:{'arguments': '"{\\"code\\": \\"def add(a, b):\\\\n print(a+b)\\\\nadd(3, 5)\\", \\"language\\": \\"python\\"}"'}
This discrepancy occurs without any additional processing on the returned results, and the second message causes issues due to the additional escaping, which requires further parsing to get the correct dictionary structure.System Info
System Information
Package Information
Optional packages not installed
Other Dependencies