Open naturesh opened 1 day ago
@naturesh could you please provide a full reproducible code snippet? what is tool_calling_agent
?
from typing import Annotated, List
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnableConfig
from langgraph.graph.message import AnyMessage, add_messages
from typing_extensions import TypedDict
class State(TypedDict):
messages: Annotated[list[AnyMessage], add_messages]
class Assistant:
def __init__(self, runnable: Runnable):
"""
Initialize the Assistant with a runnable object.
Args:
runnable (Runnable): The runnable instance to invoke.
"""
self.runnable = runnable
def __call__(self, state: State, config: RunnableConfig):
"""
Call method to invoke the LLM and handle its responses.
Re-prompt the assistant if the response is not a tool call or meaningful text.
Args:
state (State): The current state containing messages.
config (RunnableConfig): The configuration for the runnable.
Returns:
dict: The final state containing the updated messages.
"""
while True:
result = self.runnable.invoke(state) # Invoke the LLM
if not result.tool_calls and (
not result.content
or isinstance(result.content, list)
and not result.content[0].get("text")
):
messages = state["messages"] + [("user", "Respond with a real output.")]
state = {**state, "messages": messages}
else:
break
return {"messages": result}
# Create the primary assistant prompt template
primary_assistant_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful assistant tasked with answering user questions. "
"You have access to two tools: retrieve_documents and web_search. "
"For any user questions about LLM agents, use the retrieve_documents tool to get information for a vectorstore. "
"For any other questions, such as questions about current events, use the web_search tool to get information from the web. ",
),
("placeholder", "{messages}"),
]
)
# Prompt our LLM and bind tools
assistant_runnable = primary_assistant_prompt | llm.bind_tools(tools)
from langchain_core.messages import ToolMessage
from langchain_core.runnables import RunnableLambda
from langgraph.prebuilt import ToolNode
def create_tool_node_with_fallback(tools: list) -> dict:
return ToolNode(tools).with_fallbacks(
[RunnableLambda(handle_tool_error)], exception_key="error"
)
def handle_tool_error(state: State) -> dict:
error = state.get("error")
tool_calls = state["messages"][-1].tool_calls
return {
"messages": [
ToolMessage(
content=f"Error: {repr(error)}\n please fix your mistakes.",
tool_call_id=tc["id"],
)
for tc in tool_calls
]
}
from IPython.display import Image, display
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import END, START, StateGraph
from langgraph.prebuilt import tools_condition
# Graph
builder = StateGraph(State)
# Define nodes: these do the work
builder.add_node("assistant", Assistant(assistant_runnable))
builder.add_node("tools", create_tool_node_with_fallback(tools))
# Define edges: these determine how the control flow moves
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
# If the latest message (result) from assistant is a tool call -> tools_condition routes to tools
# If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END
tools_condition,
)
builder.add_edge("tools", "assistant")
# The checkpointer lets the graph persist its state
memory = MemorySaver()
react_graph = builder.compile(checkpointer=memory)
# Show
display(Image(react_graph.get_graph(xray=True).draw_mermaid_png()))
this is my code ( langgraph tutorial example )
@naturesh i cannot reproduce the issue when invoking the graph from your example with postgres checkpointer, e.g.
config = {"configurable": {"thread_id": "2"}}
DB_URI = "postgres://postgres:postgres@localhost:5442/postgres?sslmode=disable"
with PostgresSaver.from_conn_string(DB_URI) as checkpointer:
react_graph = builder.compile(checkpointer=checkpointer)
res = react_graph.invoke({"messages": [("user", "hi")]}, config)
have you called PostgresSaver.setup()
? also, which postgres version are you running locally? we recommend postgres >= 16
also, i would recommend rerunning a notebook in a fresh virtual environment and see if the error is still there. let me know if you're still running into issues after that
Checked other resources
Example Code
Error Message and Stack Trace (if applicable)
Description
https://langchain-ai.github.io/langgraph/how-tos/persistence_postgres/
kwargs={ "autocommit": True, "prepare_threshold": 0, }
In , when threshold is a large number, the typical llm agent outputs well, but errors occur after using the tool.
If threshold 0, llm agent output will not come out and an error will occur.
System Info
langgraph==0.2.34 langgraph-checkpoint==2.0.0 langgraph-checkpoint-postgres==2.0.0
I am using Postgres in supabase project