langchain-ai / langgraph

Build resilient language agents as graphs.
https://langchain-ai.github.io/langgraph/
MIT License
4.44k stars 672 forks source link

Issue with running customer-support.ipynb with OpenAI Chat Model #494

Closed arihant-jha closed 1 month ago

arihant-jha commented 1 month ago

Checked other resources

Example Code


from langchain_core.runnables import RunnableLambda
from langchain_core.messages import ToolMessage

from langgraph.prebuilt import ToolNode

def handle_tool_error(state) -> dict:
    error = state.get("error")
    tool_calls = state["messages"][-1].tool_calls
    return {
        "messages": [
            ToolMessage(
                content=f"Error: {repr(error)}\n please fix your mistakes.",
                tool_call_id=tc["id"],
            )
            for tc in tool_calls
        ]
    }

def create_tool_node_with_fallback(tools: list) -> dict:
    return ToolNode(tools).with_fallbacks(
        [RunnableLambda(handle_tool_error)], exception_key="error"
    )

def _print_event(event: dict, _printed: set, max_length=1500):
    current_state = event.get("dialog_state")
    if current_state:
        print(f"Currently in: ", current_state[-1])
    message = event.get("messages")
    if message:
        if isinstance(message, list):
            message = message[-1]
        if message.id not in _printed:
            msg_repr = message.pretty_repr(html=True)
            if len(msg_repr) > max_length:
                msg_repr = msg_repr[:max_length] + " ... (truncated)"
            print(msg_repr)
            _printed.add(message.id)

from typing import Annotated

from typing_extensions import TypedDict

from langgraph.graph.message import AnyMessage, add_messages

class State(TypedDict):
    messages: Annotated[list[AnyMessage], add_messages]

from langchain_anthropic import ChatAnthropic
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnableConfig

class Assistant:
    def __init__(self, runnable: Runnable):
        self.runnable = runnable

    def __call__(self, state: State, config: RunnableConfig):
        while True:
            passenger_id = config.get("passenger_id", None)
            state = {**state, "user_info": passenger_id}
            result = self.runnable.invoke(state)
            # If the LLM happens to return an empty response, we will re-prompt it
            # for an actual response.
            if not result.tool_calls and (
                not result.content
                or isinstance(result.content, list)
                and not result.content[0].get("text")
            ):
                messages = state["messages"] + [("user", "Respond with a real output.")]
                state = {**state, "messages": messages}
            else:
                break
        return {"messages": result}

# Haiku is faster and cheaper, but less accurate
# llm = ChatAnthropic(model="claude-3-haiku-20240307")
# llm = ChatAnthropic(model="claude-3-sonnet-20240229", temperature=1)
# You could swap LLMs, though you will likely want to update the prompts when
# doing so!
from langchain_openai import ChatOpenAI

llm = ChatOpenAI(
    model="gpt-4-turbo",
    temperature=0,
    streaming=True
    )

primary_assistant_prompt = ChatPromptTemplate.from_messages(
    [
        (
            "system",
            "You are a helpful customer support assistant for Swiss Airlines. "
            " Use the provided tools to search for flights, company policies, and other information to assist the user's queries. "
            " When searching, be persistent. Expand your query bounds if the first search returns no results. "
            " If a search comes up empty, expand your search before giving up."
            "\n\nCurrent user:\n<User>\n{user_info}\n</User>"
            "\nCurrent time: {time}.",
        ),
        ("placeholder", "{messages}"),
    ]
).partial(time=datetime.now())

part_1_tools = [
    TavilySearchResults(max_results=1),
    fetch_user_flight_information,
    search_flights,
    lookup_policy,
    update_ticket_to_new_flight,
    cancel_ticket,
    search_car_rentals,
    book_car_rental,
    update_car_rental,
    cancel_car_rental,
    search_hotels,
    book_hotel,
    update_hotel,
    cancel_hotel,
    search_trip_recommendations,
    book_excursion,
    update_excursion,
    cancel_excursion,
]
part_1_assistant_runnable = primary_assistant_prompt | llm.bind_tools(part_1_tools)

from langgraph.checkpoint.sqlite import SqliteSaver
from langgraph.graph import END, StateGraph
from langgraph.prebuilt import ToolNode, tools_condition

builder = StateGraph(State)

# Define nodes: these do the work
builder.add_node("assistant", Assistant(part_1_assistant_runnable))
builder.add_node("action", create_tool_node_with_fallback(part_1_tools))
# Define edges: these determine how the control flow moves
builder.set_entry_point("assistant")
builder.add_conditional_edges(
    "assistant",
    tools_condition,
    # "action" calls one of our tools. END causes the graph to terminate (and respond to the user)
    {"action": "action", END: END},
)
builder.add_edge("action", "assistant")

# The checkpointer lets the graph persist its state
# this is a complete memory for the entire graph.
memory = SqliteSaver.from_conn_string(":memory:")
part_1_graph = builder.compile(checkpointer=memory)

import shutil
import uuid

# Let's create an example conversation a user might have with the assistant
tutorial_questions = [
    "Hi there, what time is my flight?",
    "Am i allowed to update my flight to something sooner? I want to leave later today.",
    "Update my flight to sometime next week then",
    "The next available option is great",
    "what about lodging and transportation?",
    "Yeah i think i'd like an affordable hotel for my week-long stay (7 days). And I'll want to rent a car.",
    "OK could you place a reservation for your recommended hotel? It sounds nice.",
    "yes go ahead and book anything that's moderate expense and has availability.",
    "Now for a car, what are my options?",
    "Awesome let's just get the cheapest option. Go ahead and book for 7 days",
    "Cool so now what recommendations do you have on excursions?",
    "Are they available while I'm there?",
    "interesting - i like the museums, what options are there? ",
    "OK great pick one and book it for my second day there.",
]

# Update with the backup file so we can restart from the original place in each section
shutil.copy(backup_file, db)
thread_id = str(uuid.uuid4())

config = {
    "configurable": {
        # The passenger_id is used in our flight tools to
        # fetch the user's flight information
        "passenger_id": "3442 587242",
        # Checkpoints are accessed by thread_id
        "thread_id": thread_id,
    }
}

part_1_graph.invoke({"messages": ("user", tutorial_questions[0])}, config)

# _printed = set()
# for question in tutorial_questions:
#     events = part_1_graph.stream(
#         {"messages": ("user", question)}, config, stream_mode="values"
#     )
#     for event in events:
#         _print_event(event, _printed)

Error Message and Stack Trace (if applicable)


KeyError Traceback (most recent call last) Cell In[73], line 36 24 thread_id = str(uuid.uuid4()) 26 config = { 27 "configurable": { 28 # The passenger_id is used in our flight tools to (...) 33 } 34 } ---> 36 part_1_graph.invoke({"messages": ("user", tutorial_questions[0])}, config) 38 # _printed = set() 39 # for question in tutorial_questions: 40 # events = part_1_graph.stream( (...) 43 # for event in events: 44 # _print_event(event, _printed)

File ~/miniconda3/envs/asr/lib/python3.12/site-packages/langgraph/pregel/init.py:1245, in Pregel.invoke(self, input, config, stream_mode, output_keys, input_keys, interrupt_before, interrupt_after, debug, kwargs) 1243 else: 1244 chunks = [] -> 1245 for chunk in self.stream( 1246 input, 1247 config, 1248 stream_mode=stream_mode, 1249 output_keys=output_keys, 1250 input_keys=input_keys, 1251 interrupt_before=interrupt_before, 1252 interrupt_after=interrupt_after, 1253 debug=debug, 1254 kwargs, 1255 ): 1256 if stream_mode == "values": 1257 latest = chunk

File ~/miniconda3/envs/asr/lib/python3.12/site-packages/langgraph/pregel/init.py:834, in Pregel.stream(self, input, config, stream_mode, output_keys, input_keys, interrupt_before, interrupt_after, debug) 827 done, inflight = concurrent.futures.wait( 828 futures, 829 return_when=concurrent.futures.FIRST_EXCEPTION, 830 timeout=self.step_timeout, 831 ) 833 # panic on failure or timeout --> 834 _panic_or_proceed(done, inflight, step) 836 # combine pending writes from all tasks 837 pending_writes = deque[tuple[str, Any]]()

File ~/miniconda3/envs/asr/lib/python3.12/site-packages/langgraph/pregel/init.py:1334, in _panic_or_proceed(done, inflight, step) 1332 inflight.pop().cancel() 1333 # raise the exception -> 1334 raise exc 1336 if inflight: 1337 # if we got here means we timed out 1338 while inflight: 1339 # cancel all pending tasks

File ~/miniconda3/envs/asr/lib/python3.12/concurrent/futures/thread.py:58, in _WorkItem.run(self) 55 return 57 try: ---> 58 result = self.fn(*self.args, **self.kwargs) 59 except BaseException as exc: 60 self.future.set_exception(exc)

File ~/miniconda3/envs/asr/lib/python3.12/site-packages/langgraph/pregel/retry.py:66, in run_with_retry(task, retry_policy) 64 task.writes.clear() 65 # run the task ---> 66 task.proc.invoke(task.input, task.config) 67 # if successful, end 68 break

File ~/miniconda3/envs/asr/lib/python3.12/site-packages/langchain_core/runnables/base.py:2368, in RunnableSequence.invoke(self, input, config) 2366 try: 2367 for i, step in enumerate(self.steps): -> 2368 input = step.invoke( 2369 input, 2370 # mark each step as a child run 2371 patch_config( 2372 config, callbacks=run_manager.get_child(f"seq:step:{i+1}") 2373 ), 2374 ) 2375 # finish the root run 2376 except BaseException as e:

File ~/miniconda3/envs/asr/lib/python3.12/site-packages/langgraph/utils.py:89, in RunnableCallable.invoke(self, input, config) 83 context.run(var_child_runnable_config.set, config) 84 kwargs = ( 85 {self.kwargs, "config": config} 86 if accepts_config(self.func) 87 else self.kwargs 88 ) ---> 89 ret = context.run(self.func, input, kwargs) 90 if isinstance(ret, Runnable) and self.recurse: 91 return ret.invoke(input, config)

File ~/miniconda3/envs/asr/lib/python3.12/site-packages/langgraph/graph/graph.py:75, in Branch._route(self, input, config, reader, writer) 73 result = [result] 74 if self.ends: ---> 75 destinations = [self.ends[r] for r in result] 76 else: 77 destinations = result

KeyError: 'tools'

Description

I'm trying to run the customer-support tutorial with the Openai Chatbot, but it is producing the above error for me. The tutorial is originally for Antrhopic Chat Model, but it also had commented code snippet for running the same code with gpt, which when I'm trying is giving me this error. The code looks good to me, the tools binding is present in the agent_runnable also, the agent is returning responses for simple questions not using tools.

System Info

langchain==0.2.0 langchain-anthropic==0.1.13 langchain-community==0.2.0 langchain-core==0.2.0 langchain-openai==0.1.7 langchain-text-splitters==0.2.0 langgraph==0.0.50

platform MacOS Python 3.12.3

wadoodba commented 1 month ago

i am facing the same issue is there any update about it?. I also tried with ANTHROPIC but same error occurs.

System info langchain ==0.1.8 langchain-anthropic==0.1.13 langchain-community ==0.0.38 langchain-core ==0.1.52 langchain-openai ==0.1.7 langchain-text-splitters== 0.2.0 langgraph == 0.0.50 langsmith ==0.1.29

platform windows (anaconda env) python=3.11.8

ERROR Message:

================================ Human Message =================================

Hi there, what time is my flight?

KeyError Traceback (most recent call last) Cell In[18], line 45 41 for question in tutorial_questions: 42 events = part_1_graph.stream( 43 {"messages": ("user", question)}, config, stream_mode="values" 44 ) ---> 45 for event in events: 46 _print_event(event, _printed)

File c:\Users\User.conda\envs\ragui\Lib\site-packages\langgraph\pregel__init.py:834, in Pregel.stream(self, input, config, stream_mode, output_keys, input_keys, interrupt_before, interrupt_after, debug) 827 done, inflight = concurrent.futures.wait( 828 futures, 829 return_when=concurrent.futures.FIRST_EXCEPTION, 830 timeout=self.step_timeout, [831](file:///C:/Users/User/.conda/envs/ragui/Lib/site-packages/langgraph/pregel/init.py:831) ) [833](file:///C:/Users/User/.conda/envs/ragui/Lib/site-packages/langgraph/pregel/init.py:833) # panic on failure or timeout --> [834](file:///C:/Users/User/.conda/envs/ragui/Lib/site-packages/langgraph/pregel/init__.py:834) _panic_or_proceed(done, inflight, step) 836 # combine pending writes from all tasks 837 pending_writes = deque[tuple[str, Any]]()

File c:\Users\User.conda\envs\ragui\Lib\site-packages\langgraph\pregel__init__.py:1334, in _panic_or_proceed(done, inflight, step) 1332 inflight.pop().cancel() 1333 # raise the exception -> 1334 raise exc 1336 if inflight: 1337 # if we got here means we timed out 1338 while inflight: 1339 # cancel all pending tasks

File c:\Users\User.conda\envs\ragui\Lib\concurrent\futures\thread.py:58, in _WorkItem.run(self) 55 return 57 try: ---> 58 result = self.fn(*self.args, **self.kwargs) 59 except BaseException as exc: 60 self.future.set_exception(exc)

File c:\Users\User.conda\envs\ragui\Lib\site-packages\langgraph\pregel\retry.py:66, in run_with_retry(task, retry_policy) 64 task.writes.clear() 65 # run the task ---> 66 task.proc.invoke(task.input, task.config) 67 # if successful, end 68 break

File c:\Users\User.conda\envs\ragui\Lib\site-packages\langchain_core\runnables\base.py:2499, in RunnableSequence.invoke(self, input, config) 2497 try: 2498 for i, step in enumerate(self.steps): -> 2499 input = step.invoke( 2500 input, 2501 # mark each step as a child run 2502 patch_config( 2503 config, callbacks=run_manager.get_child(f"seq:step:{i+1}") 2504 ), 2505 ) 2506 # finish the root run 2507 except BaseException as e:

File c:\Users\User.conda\envs\ragui\Lib\site-packages\langgraph\utils.py:89, in RunnableCallable.invoke(self, input, config) 83 context.run(var_child_runnable_config.set, config) 84 kwargs = ( 85 {self.kwargs, "config": config} 86 if accepts_config(self.func) 87 else self.kwargs 88 ) ---> 89 ret = context.run(self.func, input, kwargs) 90 if isinstance(ret, Runnable) and self.recurse: 91 return ret.invoke(input, config)

File c:\Users\User.conda\envs\ragui\Lib\site-packages\langgraph\graph\graph.py:75, in Branch._route(self, input, config, reader, writer) 73 result = [result] 74 if self.ends: ---> 75 destinations = [self.ends[r] for r in result] 76 else: 77 destinations = result

File c:\Users\User.conda\envs\ragui\Lib\site-packages\langgraph\graph\graph.py:75, in (.0) 73 result = [result] 74 if self.ends: ---> 75 destinations = [self.ends[r] for r in result] 76 else: 77 destinations = result

KeyError: 'tools'

hinthornw commented 1 month ago

Pushed a fix to the notebook! Last week we updated the tools_condition to return the node "tools" (since that's a better long-term home for it); hadn't originally fixed the notebook to label the nodes as "tools" instaed of "action"

arihant-jha commented 1 month ago

Working now <3 Thanks for the prompt fix!

hinthornw commented 1 month ago

Awesome! Keep the feedback coming!