langchain-ai / langgraph-studio

Desktop app for prototyping and debugging LangGraph applications locally.
https://studio.langchain.com
1.97k stars 131 forks source link

LangGraph Studio without Human Message ID #171

Open Nachoeigu opened 2 months ago

Nachoeigu commented 2 months ago

Checked other resources

Example Code

import os
from dotenv import load_dotenv
import sys

load_dotenv()
WORKDIR=os.getenv("WORKDIR")
os.chdir(WORKDIR)
sys.path.append(WORKDIR)

from langgraph.graph import StateGraph, END
from src.utils import State, GraphInput, GraphOutput, GraphConfig
from src.nodes import *
from src.router import *

def defining_nodes(workflow: StateGraph):
    workflow.add_node("llm", answer_query)
    workflow.add_node("summarizing_memory", summarize_memory)

    return workflow

def defining_edges(workflow: StateGraph):
    workflow.add_conditional_edges(
        "llm",
        define_next_step)
    workflow.add_edge("summarizing_memory", END)

    return workflow

class GraphConfig(BaseModel):
    """
    Initial configuration to trigger the AI system.

    Attributes:
    - qa_model: Select the model for the LLM. Options include 'openai', 'google', 'meta', or 'amazon'.
    - system_prompt: Select the prompt of your conversation.
    - temperature: Select the temperature for the model. Options range from 0 to 1.
    - using_summary_in_memory: If you want to summarize previous messages, place True. Otherwise, False.
    """
    qa_model: Literal[*AVAILABLE_MODELS]
    system_prompt: Literal[*CUSTOM_PROMPTS.keys()]
    temperature: float = Field(ge=0, le=1)
    using_summary_in_memory: bool = False

    @validator("temperature")
    def check_temperature(cls, temperature: float):
        if (temperature < 0.0) | (temperature > 1.0):
            raise ValueError("Temperature should be between 0 and 1")

        return temperature

class State(TypedDict):
    messages: Annotated[List[AnyMessage], operator.add]
    summary: str

class GraphInput(TypedDict):
    """
    The initial message that starts the AI system
    """
    messages: Annotated[List[AnyMessage], operator.add]

class GraphOutput(TypedDict):
    """
    The output of the AI System
    """
    messages: List[AnyMessage]

workflow = StateGraph(State, 
                      input = GraphInput,
                      output = GraphOutput,
                      config_schema = GraphConfig)

workflow = StateGraph(State, 
                      input = GraphInput,
                      output = GraphOutput,
                      config_schema = GraphConfig)

workflow.set_entry_point("llm")
workflow = defining_nodes(workflow = workflow)
workflow = defining_edges(workflow = workflow)

app = workflow.compile()

Error Message and Stack Trace (if applicable)

delete_messages = [RemoveMessage(id=m.id) for m in state["messages"][:-2]]
AttributeError: 'dict' object has no attribute 'id'

Description

I developed a simple chatbot system with a memory summarisation feature, but when I test it on LangGraph Studio, I see that the Human Message generated in the UI of LangGraph Studio doesn´t generate an ID. I think it should generate one randomly automatically.

Then, when I implement a summary of the memory, the RemoveMessage fails because the Human Message doesn´t have an associated ID.

Video sample:

https://github.com/user-attachments/assets/bc1c730e-7732-4865-83bd-62492560826a

Here the state info:


{
  "values": {
    "messages": [
      {
        "type": "human",
        "content": "Hi, bro!"
      },
      {
        "content": "Hey! How’s it going?",
        "additional_kwargs": {},
        "response_metadata": {
          "finish_reason": "stop",
          "model_name": "chatgpt-4o-latest",
          "system_fingerprint": "fp_61e551c4c2"
        },
        "type": "ai",
        "name": null,
        "id": "run-b424b05f-35a8-44ee-a248-963205798645",
        "example": false,
        "tool_calls": [],
        "invalid_tool_calls": [],
        "usage_metadata": null
      },
      {
        "type": "human",
        "content": "How are you?"
      },
      {
        "content": "I'm just a program, but thanks for asking! How about you?",
        "additional_kwargs": {},
        "response_metadata": {
          "finish_reason": "stop",
          "model_name": "chatgpt-4o-latest",
          "system_fingerprint": "fp_7668e88a74"
        },
        "type": "ai",
        "name": null,
        "id": "run-e28083ac-fe19-49d4-9aeb-23208bec0662",
        "example": false,
        "tool_calls": [],
        "invalid_tool_calls": [],
        "usage_metadata": null
      },
      {
        "type": "human",
        "content": "great"
      },
      {
        "content": "Awesome to hear! What's on your mind today?",
        "additional_kwargs": {},
        "response_metadata": {
          "finish_reason": "stop",
          "model_name": "chatgpt-4o-latest",
          "system_fingerprint": "fp_61e551c4c2"
        },
        "type": "ai",
        "name": null,
        "id": "run-38f86316-f003-445c-91d4-bb6add7d53f6",
        "example": false,
        "tool_calls": [],
        "invalid_tool_calls": [],
        "usage_metadata": null
      }
    ]
  },
  "next": [
    "summarizing_memory"
  ],
  "tasks": [
    {
      "id": "3fa1fcb4-765b-1806-3580-9999beccf13a",
      "name": "summarizing_memory",
      "path": [
        "__pregel_pull",
        "summarizing_memory"
      ],
      "error": "AttributeError(\"'dict' object has no attribute 'id'\")",
      "interrupts": [],
      "state": null
    }
  ],
  "metadata": {
    "step": 7,
    "run_id": "1ef79cc3-7c1d-687b-84da-a966c4b35902",
    "source": "loop",
    "writes": {
      "llm": {
        "messages": [
          {
            "id": "run-38f86316-f003-445c-91d4-bb6add7d53f6",
            "name": null,
            "type": "ai",
            "content": "Awesome to hear! What's on your mind today?",
            "example": false,
            "tool_calls": [],
            "usage_metadata": null,
            "additional_kwargs": {},
            "response_metadata": {
              "model_name": "chatgpt-4o-latest",
              "finish_reason": "stop",
              "system_fingerprint": "fp_61e551c4c2"
            },
            "invalid_tool_calls": []
          }
        ]
      }
    },
    "parents": {},
    "user_id": "",
    "graph_id": "agent",
    "thread_id": "4f1bcf71-7fd9-4df6-bc41-8af9367a8866",
    "thread_ts": "1ef79cbb-cf4f-65f3-8004-e6d3a55f5603",
    "created_by": "system",
    "run_attempt": 1,
    "assistant_id": "fe096781-5601-53d2-b2f6-0d3403f7e9ca",
    "using_summary_in_memory": true
  },
  "created_at": "2024-09-23T16:52:29.787736+00:00",
  "checkpoint_id": "1ef79cc3-85ff-67df-8007-707c3006afa3",
  "parent_checkpoint_id": "1ef79cc3-7d4f-6703-8006-61d7888b21a8"
}

System Info

langchain_community python-dotenv langchain_openai langchain_core langchain_google_vertexai langchain streamlit langchain-google-genai langchain-anthropic langchain-groq transformers langgraph langchain-aws

hinthornw commented 2 months ago

Hi there! What does your State schema look like? If you're using add_messages, that reducer will typically ensure your message has an ID.

I agree it makes sense to generate an ID for messages originating from the studio. cc @dqbd

Nachoeigu commented 2 months ago

Hi there! What does your State schema look like? If you're using add_messages, that reducer will typically ensure your message has an ID.

I agree it makes sense to generate an ID for messages originating from the studio. cc @dqbd

Yes, here more context:

class GraphConfig(BaseModel):
    """
    Initial configuration to trigger the AI system.

    Attributes:
    - qa_model: Select the model for the LLM. Options include 'openai', 'google', 'meta', or 'amazon'.
    - system_prompt: Select the prompt of your conversation.
    - temperature: Select the temperature for the model. Options range from 0 to 1.
    - using_summary_in_memory: If you want to summarize previous messages, place True. Otherwise, False.
    """
    qa_model: Literal[*AVAILABLE_MODELS]
    system_prompt: Literal[*CUSTOM_PROMPTS.keys()]
    temperature: float = Field(ge=0, le=1)
    using_summary_in_memory: bool = False

    @validator("temperature")
    def check_temperature(cls, temperature: float):
        if (temperature < 0.0) | (temperature > 1.0):
            raise ValueError("Temperature should be between 0 and 1")

        return temperature

class State(TypedDict):
    messages: Annotated[List[AnyMessage], operator.add]
    summary: str

class GraphInput(TypedDict):
    """
    The initial message that starts the AI system
    """
    messages: Annotated[List[AnyMessage], operator.add]

class GraphOutput(TypedDict):
    """
    The output of the AI System
    """
    messages: List[AnyMessage]

workflow = StateGraph(State, 
                      input = GraphInput,
                      output = GraphOutput,
                      config_schema = GraphConfig)