langchain-ai / langgraph

Build resilient language agents as graphs.
https://langchain-ai.github.io/langgraph/
MIT License
4.21k stars 630 forks source link

After changing chat_models from ChatOpenAI to ChatVertexAI, i get a error 'ValueError: SystemMessage should be the first in the history.' #628

Open weatherbetter opened 3 weeks ago

weatherbetter commented 3 weeks ago

Checked other resources

Example Code

calculate = get_math_tool(ChatVertexAI(model_name="gemini-pro"))
llm = ChatVertexAI(model_name="gemini-pro")

### Error Message and Stack Trace (if applicable)

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
Cell In[38], line 3
      1 example_question = "What's the temperature in SF raised to the 3rd power?"
----> 3 for task in planner.stream([HumanMessage(content=example_question)]):
      4     print(task["tool"], task["args"])

File /opt/conda/lib/python3.10/site-packages/langchain_core/runnables/base.py:2873, in RunnableSequence.stream(self, input, config, **kwargs)
   2867 def stream(
   2868     self,
   2869     input: Input,
   2870     config: Optional[RunnableConfig] = None,
   2871     **kwargs: Optional[Any],
   2872 ) -> Iterator[Output]:
-> 2873     yield from self.transform(iter([input]), config, **kwargs)

File /opt/conda/lib/python3.10/site-packages/langchain_core/runnables/base.py:2860, in RunnableSequence.transform(self, input, config, **kwargs)
   2854 def transform(
   2855     self,
   2856     input: Iterator[Input],
   2857     config: Optional[RunnableConfig] = None,
   2858     **kwargs: Optional[Any],
   2859 ) -> Iterator[Output]:
-> 2860     yield from self._transform_stream_with_config(
   2861         input,
   2862         self._transform,
   2863         patch_config(config, run_name=(config or {}).get("run_name") or self.name),
   2864         **kwargs,
   2865     )

File /opt/conda/lib/python3.10/site-packages/langchain_core/runnables/base.py:1865, in Runnable._transform_stream_with_config(self, input, transformer, config, run_type, **kwargs)
   1863 try:
   1864     while True:
-> 1865         chunk: Output = context.run(next, iterator)  # type: ignore
   1866         yield chunk
   1867         if final_output_supported:

File /opt/conda/lib/python3.10/site-packages/langchain_core/runnables/base.py:2822, in RunnableSequence._transform(self, input, run_manager, config, **kwargs)
   2819     else:
   2820         final_pipeline = step.transform(final_pipeline, config)
-> 2822 for output in final_pipeline:
   2823     yield output

File /opt/conda/lib/python3.10/site-packages/langchain_core/output_parsers/transform.py:50, in BaseTransformOutputParser.transform(self, input, config, **kwargs)
     44 def transform(
     45     self,
     46     input: Iterator[Union[str, BaseMessage]],
     47     config: Optional[RunnableConfig] = None,
     48     **kwargs: Any,
     49 ) -> Iterator[T]:
---> 50     yield from self._transform_stream_with_config(
     51         input, self._transform, config, run_type="parser"
     52     )

File /opt/conda/lib/python3.10/site-packages/langchain_core/runnables/base.py:1829, in Runnable._transform_stream_with_config(self, input, transformer, config, run_type, **kwargs)
   1827 input_for_tracing, input_for_transform = tee(input, 2)
   1828 # Start the input iterator to ensure the input runnable starts before this one
-> 1829 final_input: Optional[Input] = next(input_for_tracing, None)
   1830 final_input_supported = True
   1831 final_output: Optional[Output] = None

File /opt/conda/lib/python3.10/site-packages/langchain_core/runnables/base.py:1179, in Runnable.transform(self, input, config, **kwargs)
   1176             final = ichunk
   1178 if got_first_val:
-> 1179     yield from self.stream(final, config, **kwargs)

File /opt/conda/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:265, in BaseChatModel.stream(self, input, config, stop, **kwargs)
    258 except BaseException as e:
    259     run_manager.on_llm_error(
    260         e,
    261         response=LLMResult(
    262             generations=[[generation]] if generation else []
    263         ),
    264     )
--> 265     raise e
    266 else:
    267     run_manager.on_llm_end(LLMResult(generations=[[generation]]))

File /opt/conda/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:245, in BaseChatModel.stream(self, input, config, stop, **kwargs)
    243 generation: Optional[ChatGenerationChunk] = None
    244 try:
--> 245     for chunk in self._stream(messages, stop=stop, **kwargs):
    246         if chunk.message.id is None:
    247             chunk.message.id = f"run-{run_manager.run_id}"

File /opt/conda/lib/python3.10/site-packages/langchain_google_vertexai/chat_models.py:1011, in ChatVertexAI._stream(self, messages, stop, run_manager, **kwargs)
   1007     yield from self._stream_non_gemini(
   1008         messages, stop=stop, run_manager=run_manager, **kwargs
   1009     )
   1010     return
-> 1011 yield from self._stream_gemini(
   1012     messages=messages, stop=stop, run_manager=run_manager, **kwargs
   1013 )
   1014 return

File /opt/conda/lib/python3.10/site-packages/langchain_google_vertexai/chat_models.py:1023, in ChatVertexAI._stream_gemini(self, messages, stop, run_manager, **kwargs)
   1016 def _stream_gemini(
   1017     self,
   1018     messages: List[BaseMessage],
   (...)
   1021     **kwargs: Any,
   1022 ) -> Iterator[ChatGenerationChunk]:
-> 1023     request = self._prepare_request_gemini(messages=messages, stop=stop, **kwargs)
   1024     response_iter = _completion_with_retry(
   1025         self.prediction_client.stream_generate_content,
   1026         max_retries=self.max_retries,
   (...)
   1030         **kwargs,
   1031     )
   1032     for response_chunk in response_iter:

File /opt/conda/lib/python3.10/site-packages/langchain_google_vertexai/chat_models.py:796, in ChatVertexAI._prepare_request_gemini(self, messages, stop, stream, tools, functions, tool_config, safety_settings, **kwargs)
    785 def _prepare_request_gemini(
    786     self,
    787     messages: List[BaseMessage],
   (...)
    794     **kwargs,
    795 ) -> GenerateContentRequest:
--> 796     system_instruction, contents = _parse_chat_history_gemini(messages)
    797     formatted_tools = self._tools_gemini(tools=tools, functions=functions)
    798     tool_config = self._tool_config_gemini(tool_config=tool_config)

File /opt/conda/lib/python3.10/site-packages/langchain_google_vertexai/chat_models.py:244, in _parse_chat_history_gemini(history, project, convert_system_message_to_human)
    242 prev_ai_message = None
    243 if i != 0:
--> 244     raise ValueError("SystemMessage should be the first in the history.")
    245 if system_instruction is not None:
    246     raise ValueError(
    247         "Detected more than one SystemMessage in the list of messages."
    248         "Gemini APIs support the insertion of only one SystemMessage."
    249     )

ValueError: SystemMessage should be the first in the history.

Description

After changing chat_models from ChatOpenAI to ChatVertexAI, i get a error 'ValueError: SystemMessage should be the first in the history.' file : LLMCompiler.ipynb

System Info

python -m langchain_core.sys_info

hinthornw commented 3 weeks ago

Hi @weatherbetter, I'm assuming you are using the multi-agent notebook for this?

For vertex or other model providers, I'd replace the system messages with human messages with the contetn wrapped in XM, since they don't support the full openai API

weatherbetter commented 3 weeks ago

Hi @hinthornw First of all, thank you for your reply.

I tried using 'convert_system_message_to_human'. But it still doesn't work. Am I using the wrong method? What should I do? llm = ChatVertexAI(model_name="gemini-pro", convert_system_message_to_human=True )

hinthornw commented 3 weeks ago

I was thinking more along the lines of this:

def sanitize(messages: list):
      return [HumanMessage(content=f"<system-message>{m.content}</system-message>") if m.type == "system" else m for m in messages]

llm = sanitize | ChatVertexAI(model_name="gemini-pro")
chocky18 commented 3 weeks ago

Same issue with my code

hinthornw commented 3 weeks ago

@chocky18 did you try a suggested fix?

weatherbetter commented 3 weeks ago

@hinthornw i tried But it still doesn't work. Same error occurs.

chocky18 commented 3 weeks ago

import functools import operator from typing import Sequence, TypedDict, List, Optional, Any, Dict from langchain_core.pydantic_v1 import BaseModel, Field, validator from langchain_google_vertexai import create_structured_runnable import json import google.generativeai as genai import os from typing import ( Any, AsyncIterator, Callable, Dict, Iterator, List, Optional, Sequence, Type, Union, cast, Literal, TypedDict, overload, ) from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage, AIMessage, FunctionMessage, ToolMessage

from langgraph.graph import END, StateGraph, agent_node, create_agent

os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "json key" os.environ["GENAI_API_KEY"] = "API key" class AgentState(TypedDict): messages: Sequence[BaseMessage] next: str

def setup_supervisor_chain(members: List[str], options: List[str], prompt: str): """ Setup the supervisor chain using LangChain libraries.

Args:
- members (List[str]): List of members/workers.
- options (List[str]): List of options including 'FINISH' and members.
- prompt (str): Prompt template for the supervisor chain.

Returns:
- Any: Supervisor chain instance configured with the given parameters.
"""
# Ensure GOOGLE_APPLICATION_CREDENTIALS is set
import os
if 'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ:
    raise EnvironmentError("GOOGLE_APPLICATION_CREDENTIALS environment variable is not set.")

# llm = ChatGoogleGenerativeAI(temperature=0, model="gemini-pro", convert_system_message_to_human=True)
# llm = ChatVertexAI(model="gemini-pro",project="agents")
genai.configure(api_key=os.environ['GENAI_API_KEY'])
# llm  = genai.GenerativeModel(name='gemini-1.5-flash')
llm = ChatVertexAI(
model="gemini-1.5-flash-001",
temperature=0,
max_tokens=None,
max_retries=6,
stop=None,
# other params...

)

class Route(BaseModel):
    """Select the next role."""
    next: Literal["Researcher", "Coder", "FINISH"] = Field(..., description="Select the next agent to execute the pending task or message")

supervisor_chain = create_structured_runnable([Route], llm, prompt=prompt)
return supervisor_chain

Example usage:

members = ["Researcher", "Coder"] options = ["FINISH"] + members

prompt = ChatPromptTemplate.from_messages([ ("You are a supervisor tasked with managing a conversation between the" " following workers: {members}. Given the following user request," " respond with the worker to act next. Each worker will perform a" " task and respond with their results and status. When finished," " if a message or result contains the word FINISH,respond with FINISH,again if a message or result contains the word FINISH your output must be FINISH " "Given the following query: {messages}, Make calls to the relevant function to tell us who should execute the inputed query or messages? you should only Select one of of: {options}," "Or should we FINISH? " "Tip: Make sure to answer in the correct format, and output absolutely nothing else") ]).partial(options=str(options), members=", ".join(members))

supervisor_chain = setup_supervisor_chain(members, options, prompt)

# # print("chain1",chain)
# # Assuming `state['input']` contains the input message for the supervisor chain
# input_message = state['messages']
# print("input_message",input_message)
# # Run the supervisor chain with the input message
# output = chain.invoke(input_message)
# print("output",output)
# # Update the state with the output's next step
# state['next'] = output.get('next', 'FINISH')
# return state

Define a simple supervisor chain function

Initialize the language model

llm = ChatVertexAI(model="gemini-pro",project="agents-426216")

Create the supervisor chain

supervisor_chain = (

prompt

| llm.bind_tools([function_def], function_call="route")

| JsonOutputFunctionsParser()

)

def supervisor_chain(state):

state['next'] = 'FINISH'

return state

Define a simple _convert_to_prompt function

def _convert_to_prompt(part: str) -> Dict[str, Any]: return {"text": part}

Define the agents and nodes

research_agent = create_agent(llm, [tavily_tool], "You are a web researcher.") research_node = functools.partial(agent_node, agent=research_agent, name="Researcher")

code_agent = create_agent( llm, [python_repl_tool], "You may generate safe python code to analyze data and generate charts using matplotlib.", ) code_node = functools.partial(agent_node, agent=code_agent, name="Coder")

Define the workflow

workflow = StateGraph(AgentState) workflow.add_node("Researcher", research_node) workflow.add_node("Coder", code_node) workflow.add_node("supervisor", supervisor_chain) # Use the placeholder supervisor_chain

Define members and add edges

members = ["Researcher", "Coder"] for member in members: workflow.add_edge(member, "supervisor")

Define conditional map

conditional_map = {k: k for k in members} conditional_map["FINISH"] = END workflow.add_conditional_edges("supervisor", lambda x: x["next"], conditional_map)

Set entry point

workflow.set_entry_point("supervisor")

Compile the workflow

graph = workflow.compile()

Define the function to convert messages

def _convert_to_parts(message: BaseMessage) -> List[Dict[str, Any]]: raw_content = message.content if isinstance(raw_content, str): raw_content = [raw_content] return [_convert_to_prompt(part) for part in raw_content]

def convert_messages_to_vertex_format(history: List[BaseMessage], convert_system_message_to_human: bool = False): vertex_messages: List[Dict[str, Any]] = [] system_parts: Optional[List[Dict[str, Any]]] = None system_instruction = None prev_ai_message: Optional[AIMessage] = None

for i, message in enumerate(history):
    if isinstance(message, SystemMessage):
        prev_ai_message = None
        if i != 0:
            raise ValueError("SystemMessage should be the first in the history.")
        if system_instruction is not None:
            raise ValueError(
                "Detected more than one SystemMessage in the list of messages. "
                "Gemini APIs support the insertion of only one SystemMessage."
            )
        if convert_system_message_to_human:
            system_parts = _convert_to_parts(message)
            continue
        system_instruction = {"role": "user", "parts": _convert_to_parts(message)}
    elif isinstance(message, HumanMessage):
        prev_ai_message = None
        role = "user"
        parts = _convert_to_parts(message)
        if system_parts is not None:
            if i != 1:
                raise ValueError(
                    "System message should be immediately followed by HumanMessage"
                )
            parts = system_parts + parts
            system_parts = None
        vertex_messages.append({"role": role, "parts": parts})
    elif isinstance(message, AIMessage):
        prev_ai_message = message
        role = "model"
        parts = _convert_to_parts(message) if message.content else []
        for tc in message.tool_calls:
            function_call = {"name": tc["name"], "args": tc["args"]}
            parts.append({"function_call": function_call})
        vertex_messages.append({"role": role, "parts": parts})
    elif isinstance(message, FunctionMessage):
        prev_ai_message = None
        role = "function"
        part = {"function_response": {"name": message.name, "response": {"content": message.content}}}
        prev_content = vertex_messages[-1]
        if prev_content and prev_content["role"] == "function":
            parts = list(prev_content["parts"])
            parts.append(part)
            vertex_messages[-1] = {"role": role, "parts": parts}
            continue
        vertex_messages.append({"role": role, "parts": [part]})
    elif isinstance(message, ToolMessage):
        role = "function"
        name = message.name or next(
            (t["name"] for t in prev_ai_message.tool_calls if t["id"] == message.tool_call_id), None
        )
        if not name:
            raise ValueError("Message name is empty and can't find corresponding tool call")
        content = _parse_tool_message_content(message.content)
        part = {"function_response": {"name": name, "response": content}}
        prev_content = vertex_messages[-1]
        if prev_content and prev_content["role"] == "function":
            parts = list(prev_content["parts"])
            parts.append(part)
            vertex_messages[-1] = {"role": role, "parts": parts}
            continue
        vertex_messages.append({"role": role, "parts": [part]})
    else:
        raise ValueError(f"Unexpected message type {type(message)} at position {i}.")
return system_instruction, vertex_messages

def _parse_tool_message_content(content: Any) -> Dict[Any, Any]: if isinstance(content, list): parsed_content = [_parse_content(c) for c in content] if len(parsed_content) > 1: merged_content = {} for content_piece in parsed_content: for key, value in content_piece.items(): if key not in merged_content: merged_content[key] = [] merged_content[key].append(value) return {k: "".join(v) for k, v in merged_content.items()} return parsed_content[0] return _parse_content(content)

def _parse_content(raw_content: Any) -> Dict[Any, Any]: if isinstance(raw_content, dict): return raw_content if isinstance(raw_content, str): try: content = json.loads(raw_content) if isinstance(content, dict): return content except json.JSONDecodeError: pass return {"content": raw_content}

Prepare the message sequence

initial_message = SystemMessage(content="Starting the workflow") human_message = HumanMessage(content="Code hello world and print it to the terminal")

Convert messages

system_instruction, vertex_messages = convert_messages_to_vertex_format( [initial_message, human_message] )

Debugging: Print the converted messages

print("System instruction:", system_instruction)

print("Vertex messages:", vertex_messages)

input_messages = [initial_message, human_message]

Stream the graph with the initial SystemMessage followed by other messages

try: for s in graph.stream(

        {"messages": input_messages},
    {"recursion_limit": 100},

):
    if "__end__" not in s:
        print(s)
        print("----")

except ValueError as e: print(f"Error: {e}")

chocky18 commented 3 weeks ago

check this, its working

luishmq commented 2 weeks ago

check this, its working

Hey! I got your code, but it doesn't worked. Can you help me?

import os import functools import json from typing import Any, Dict, List, Optional, Sequence, Literal, TypedDict from langchain_core.pydantic_v1 import BaseModel, Field, validator

from langchain_core.prompts import ChatPromptTemplate from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage, AIMessage, FunctionMessage, ToolMessage from langchain_google_vertexai import ChatVertexAI, create_structured_runnable

class AgentState(TypedDict): messages: Sequence[BaseMessage] next: str

Função para configurar a cadeia de supervisores

def setup_supervisor_chain(members: List[str], options: List[str], prompt: str): llm = ChatVertexAI( model="gemini-1.5-pro-001", model_kwargs=model_kwargs, )

class Route(BaseModel):
    """Select the next role."""
    next: Literal["Components", "Records", "Details", "FINISH"] = Field(..., description="Selecione o próximo agente para executar a tarefa.")

supervisor_chain = create_structured_runnable([Route], llm, prompt=prompt)
return supervisor_chain

def create_agent(llm, tools, system_message): prompt = ChatPromptTemplate.from_messages([ ("system", ai_prompt), ("human", "{input}"), ]) return prompt | llm.bind_tools(tools)

def agent_node(state, agent, name): result = agent.invoke(state) if isinstance(result, ToolMessage): pass else: result = AIMessage(**result.dict(exclude={"type", "name"}), name=name) return { "messages": [result], "sender": name, }

from langchain_core.output_parsers import StrOutputParser from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser from langgraph.graph import END, StateGraph from langgraph.prebuilt import ToolNode

members = ["Components", "Records", "Details"]

Our team supervisor is an LLM node. It just picks the next agent to process

and decides when the work is completed

options = ["FINISH"] + members

Using openai function calling can make output parsing easier for us

prompt = ChatPromptTemplate.from_messages([ ("Você é um supervisor encarregado de gerenciar uma conversa entre" "seguintes trabalhadores: {members}. Dada a seguinte solicitação do usuário," "responda com o trabalhador para agir em seguida. Cada trabalhador executará um" "tarefa e responda com seus resultados e status. Quando terminar," "se uma mensagem ou resultado contiver a palavra FINISH, responda com FINISH. Novamente, se uma mensagem ou resultado contiver a palavra FINISH, sua saída deverá ser FINISH." " Dada a seguinte consulta: {messages}, faça chamadas para a função relevante para nos informar quem deve executar a consulta ou mensagens de entrada. Você deve selecionar apenas um de: {options}," "ou devemos TERMINAR?" "Dica: certifique-se de responder no formato correto e não produza absolutamente mais nada.") ]).partial(options=str(options), members=", ".join(members))

supervisor_chain = setup_supervisor_chain(members, options, prompt)

llm = ChatVertexAI(model=model, model_kwargs=model_kwargs, safety_settings=safety_settings)

def supervisor_chain(state): state['next'] = 'FINISH' return state

def _convert_to_prompt(part: str) -> Dict[str, Any]: return {"text": part}

def create_and_bind_agent(llm, tools, system_message, name): agent = create_agent(llm, tools, system_message) return functools.partial(agent_node, agent=agent, name=name)

components_agent = create_and_bind_agent(llm, [list_components], "Você é o Agente de Componentes da uMov.me. Você deve fornecer informações sobre os componentes disponíveis no catálogo uMov.me.", "Components") records_agent = create_and_bind_agent( llm, [list_component_records], "Você é o Agente de Registros da uMov.me. Você deve fornecer informações sobre os registros de cada componente disponível no catálogo uMov.me.", "Records" ) details_agent = create_and_bind_agent( llm, [get_component_details], "Você é o Agente de Detalhes da uMov.me. Você deve fornecer informações sobre os detalhes da estrutura de cada componente disponível no catálogo uMov.me.", "Details" )

tools = [get_component_details, list_components, list_component_records]

workflow = StateGraph(AgentState) workflow.add_node("Components", components_agent) workflow.add_node("Records", records_agent) workflow.add_node("Details", details_agent) workflow.add_node("supervisor", supervisor_chain)

for member in members:

We want our workers to ALWAYS "report back" to the supervisor when done

workflow.add_edge(member, "supervisor")

The supervisor populates the "next" field in the graph state

which routes to a node or finishes

conditional_map = {k: k for k in members} conditional_map["FINISH"] = END workflow.add_conditional_edges("supervisor", lambda x: x["next"], conditional_map)

Finally, add entrypoint

workflow.set_entry_point("supervisor")

graph = workflow.compile()

from typing import Sequence, TypedDict, List, Optional, Any, Dict from langchain_core.messages import ( AIMessage, AIMessageChunk, BaseMessage, FunctionMessage, HumanMessage, InvalidToolCall, SystemMessage, ToolCall, ToolCallChunk, ToolMessage, ) from langchain_core.output_parsers import StrOutputParser from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser from langgraph.graph import END, StateGraph from langgraph.prebuilt import ToolNode import os import functools import json from typing import Any, Dict, List, Optional, Sequence, Literal, TypedDict from langchain_core.pydantic_v1 import BaseModel, Field, validator

from langchain_core.prompts import ChatPromptTemplate from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage, AIMessage, FunctionMessage, ToolMessage from langchain_google_vertexai import ChatVertexAI, create_structured_runnable

def _convert_to_prompt(part: str) -> Dict[str, Any]: return {"text": part}

def _convert_to_parts(message: BaseMessage) -> List[Dict[str, Any]]: raw_content = message.content if isinstance(raw_content, str): raw_content = [raw_content] return [_convert_to_prompt(part) for part in raw_content]

def convert_messages_to_vertex_format(history: List[BaseMessage], convert_system_message_to_human: bool = False): vertex_messages: List[Dict[str, Any]] = [] system_parts: Optional[List[Dict[str, Any]]] = None system_instruction = None prev_ai_message: Optional[AIMessage] = None

for i, message in enumerate(history):
    if isinstance(message, SystemMessage):
        prev_ai_message = None
        if i != 0:
            raise ValueError("SystemMessage should be the first in the history.")
        if system_instruction is not None:
            raise ValueError(
                "Detected more than one SystemMessage in the list of messages. "
                "Gemini APIs support the insertion of only one SystemMessage."
            )
        if convert_system_message_to_human:
            system_parts = _convert_to_parts(message)
            continue
        system_instruction = {"role": "user", "parts": _convert_to_parts(message)}
    elif isinstance(message, HumanMessage):
        prev_ai_message = None
        role = "user"
        parts = _convert_to_parts(message)
        if system_parts is not None:
            if i != 1:
                raise ValueError(
                    "System message should be immediately followed by HumanMessage"
                )
            parts = system_parts + parts
            system_parts = None
        vertex_messages.append({"role": role, "parts": parts})
    elif isinstance(message, AIMessage):
        prev_ai_message = message
        role = "model"
        parts = _convert_to_parts(message) if message.content else []
        for tc in message.tool_calls:
            function_call = {"name": tc["name"], "args": tc["args"]}
            parts.append({"function_call": function_call})
        vertex_messages.append({"role": role, "parts": parts})
    elif isinstance(message, FunctionMessage):
        prev_ai_message = None
        role = "function"
        part = {"function_response": {"name": message.name, "response": {"content": message.content}}}
        prev_content = vertex_messages[-1]
        if prev_content and prev_content["role"] == "function":
            parts = list(prev_content["parts"])
            parts.append(part)
            vertex_messages[-1] = {"role": role, "parts": parts}
            continue
        vertex_messages.append({"role": role, "parts": [part]})
    elif isinstance(message, ToolMessage):
        role = "function"
        name = message.name or next(
            (t["name"] for t in prev_ai_message.tool_calls if t["id"] == message.tool_call_id), None
        )
        if not name:
            raise ValueError("Message name is empty and can't find corresponding tool call")
        content = _parse_tool_message_content(message.content)
        part = {"function_response": {"name": name, "response": content}}
        prev_content = vertex_messages[-1]
        if prev_content and prev_content["role"] == "function":
            parts = list(prev_content["parts"])
            parts.append(part)
            vertex_messages[-1] = {"role": role, "parts": parts}
            continue
        vertex_messages.append({"role": role, "parts": [part]})
    else:
        raise ValueError(f"Unexpected message type {type(message)} at position {i}.")
return system_instruction, vertex_messages

def _parse_tool_message_content(content: Any) -> Dict[Any, Any]: if isinstance(content, list): parsed_content = [_parse_content(c) for c in content] if len(parsed_content) > 1: merged_content = {} for content_piece in parsed_content: for key, value in content_piece.items(): if key not in merged_content: merged_content[key] = [] merged_content[key].append(value) return {k: "".join(v) for k, v in merged_content.items()} return parsed_content[0] return _parse_content(content)

def _parse_content(raw_content: Any) -> Dict[Any, Any]: if isinstance(raw_content, dict): return raw_content if isinstance(raw_content, str): try: content = json.loads(raw_content) if isinstance(content, dict): return content except json.JSONDecodeError: pass return {"content": raw_content}

initial_message = SystemMessage(content="Starting the workflow") human_message = HumanMessage(content="Quais são os componentes do catálogo?")

Converter mensagens

system_instruction, vertex_messages = convert_messages_to_vertex_format( [initial_message, human_message] )

Depuração: Imprimir as mensagens convertidas

print("System instruction:", system_instruction) print("Vertex messages:", vertex_messages) input_messages = [initial_message, human_message]

try: for s in graph.stream(

        {"messages": input_messages},
        {"recursion_limit": 100},

):
    if "__end__" not in s:
        print(s)
        print("----")

except ValueError as e: print(f"Error: {e}")

Answer: {'supervisor': {'messages': [SystemMessage(content='Starting the workflow'), HumanMessage(content='Quais são os componentes do catálogo?')], 'next': 'FINISH'}}

luishmq commented 1 week ago

@chocky18 did you try a suggested fix?

Can you help me? Please, i got the same error. I'm using ChatVertexAI.

from langchain.agents import AgentExecutor, create_openai_tools_agent from langchain_core.messages import BaseMessage, HumanMessage from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_google_vertexai import ChatVertexAI, create_structured_runnable from langchain.agents.format_scratchpad.tools import format_to_tool_messages from langchain.agents.output_parsers.tools import ToolsAgentOutputParser from langchain.tools.base import StructuredTool

def create_agent(llm: ChatVertexAI, tools: list, system_prompt: str):

Each worker node will be given a name and some tools.

prompt = ChatPromptTemplate.from_messages(
    [
        (
            "system",
            system_prompt,
        ),
        MessagesPlaceholder(variable_name="messages"),
        MessagesPlaceholder(variable_name="agent_scratchpad"),
    ]
)
llm = llm.bind_tools(tools=tools)

agent_executor = AgentExecutor(
    agent=prompt | llm | ToolsAgentOutputParser(),
    tools=[StructuredTool.from_function(tool) for tool in tools],
)
return agent_executor

def agent_node(state, agent, name): result = agent.invoke(state) return {"messages": [HumanMessage(content=result["output"], name=name)]}

from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from typing import Any, Dict, List, Optional, Sequence, Literal, TypedDict from langgraph.graph import END, StateGraph

class AgentState(TypedDict): messages: Sequence[BaseMessage] next: str

members = ["Components", "Records", "Details"] system_prompt = ( "Você é um supervisor encarregado de gerenciar uma conversa entre" "seguintes trabalhadores: {members}. Dada a seguinte solicitação do usuário," "responda com o trabalhador para agir em seguida. Cada trabalhador executará um" "tarefa e responda com seus resultados e status. Quando terminar," "se uma mensagem ou resultado contiver a palavra FINISH, responda com FINISH." )

Our team supervisor is an LLM node. It just picks the next agent to process

and decides when the work is completed

options = ["FINISH"] + members

Using openai function calling can make output parsing easier for us

function_def = { "name": "route", "description": "Select the next role.", "parameters": { "title": "routeSchema", "type": "object", "properties": { "next": { "title": "Next", "anyOf": [ {"enum": options}, ], } }, "required": ["next"], }, } prompt = ChatPromptTemplate.from_messages( [ ("system", system_prompt), MessagesPlaceholder(variable_name="messages"), ( "system", "Dada a conversa acima, quem deve agir em seguida?" "Ou devemos FINISH? Selecione uma de: {options}", ), ] ).partial(options=str(options), members=", ".join(members))

llm = ChatVertexAI(model_name="gemini-1.5-pro-001", convert_system_message_to_human=True)

supervisor_chain = ( prompt | llm.bind(functions=[function_def], function_call="route") | JsonOutputFunctionsParser() )

components_agent = create_agent(llm, [list_components], "Você é o Agente de Componentes da uMov.me. Você deve fornecer informações sobre os componentes disponíveis no catálogo uMov.me.") records_agent = create_agent( llm, [list_component_records], "Você é o Agente de Registros da uMov.me. Você deve fornecer informações sobre os registros de cada componente disponível no catálogo uMov.me." ) details_agent = create_agent( llm, [get_component_details], "Você é o Agente de Detalhes da uMov.me. Você deve fornecer informações sobre os detalhes da estrutura de cada componente disponível no catálogo uMov.me." )

tools = [get_component_details, list_components, list_component_records]

workflow = StateGraph(AgentState) workflow.add_node("Components", components_agent) workflow.add_node("Records", records_agent) workflow.add_node("Details", details_agent) workflow.add_node("supervisor", supervisor_chain)

for member in members:

We want our workers to ALWAYS "report back" to the supervisor when done

workflow.add_edge(member, "supervisor")

The supervisor populates the "next" field in the graph state

which routes to a node or finishes

conditional_map = {k: k for k in members} conditional_map["FINISH"] = END workflow.add_conditional_edges("supervisor", lambda x: x["next"], conditional_map)

Finally, add entrypoint

workflow.set_entry_point("supervisor")

graph = workflow.compile()

def sanitize(messages: list): return [HumanMessage(content=f"{m.content}", type="system") if m.type == "system" else m for m in messages]

messages = [HumanMessage(content="Quais são os componentes disponíveis no catálogo?", type="human")]

sanitized_messages = sanitize(messages)

input_dict = {"messages": sanitized_messages} # Encapsulando as mensagens em um dicionário

for s in graph.stream(input_dict): # Passando o dicionário como argumento if "end" not in s: print(s) print("----")

Error:

ValueError: SystemMessage should be the first in the history.

I really need this code as soon as possible, thanks anyway!

@hinthornw

luishmq commented 1 week ago

@chocky18 did you try a suggested fix?

Can you help me? Please, i got the same error. I'm using ChatVertexAI.

from langchain.agents import AgentExecutor, create_openai_tools_agent from langchain_core.messages import BaseMessage, HumanMessage from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_google_vertexai import ChatVertexAI, create_structured_runnable from langchain.agents.format_scratchpad.tools import format_to_tool_messages from langchain.agents.output_parsers.tools import ToolsAgentOutputParser from langchain.tools.base import StructuredTool

def create_agent(llm: ChatVertexAI, tools: list, system_prompt: str): # Each worker node will be given a name and some tools. prompt = ChatPromptTemplate.from_messages( [ ( "system", system_prompt, ), MessagesPlaceholder(variable_name="messages"), MessagesPlaceholder(variable_name="agent_scratchpad"), ] ) llm = llm.bind_tools(tools=tools)

agent_executor = AgentExecutor(
    agent=prompt | llm | ToolsAgentOutputParser(),
    tools=[StructuredTool.from_function(tool) for tool in tools],
)
return agent_executor

def agent_node(state, agent, name): result = agent.invoke(state) return {"messages": [HumanMessage(content=result["output"], name=name)]}

from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from typing import Any, Dict, List, Optional, Sequence, Literal, TypedDict from langgraph.graph import END, StateGraph

class AgentState(TypedDict): messages: Sequence[BaseMessage] next: str

members = ["Components", "Records", "Details"] system_prompt = ( "Você é um supervisor encarregado de gerenciar uma conversa entre" "seguintes trabalhadores: {members}. Dada a seguinte solicitação do usuário," "responda com o trabalhador para agir em seguida. Cada trabalhador executará um" "tarefa e responda com seus resultados e status. Quando terminar," "se uma mensagem ou resultado contiver a palavra FINISH, responda com FINISH." )

Our team supervisor is an LLM node. It just picks the next agent to process

and decides when the work is completed

options = ["FINISH"] + members

Using openai function calling can make output parsing easier for us

function_def = { "name": "route", "description": "Select the next role.", "parameters": { "title": "routeSchema", "type": "object", "properties": { "next": { "title": "Next", "anyOf": [ {"enum": options}, ], } }, "required": ["next"], }, } prompt = ChatPromptTemplate.from_messages( [ ("system", system_prompt), MessagesPlaceholder(variable_name="messages"), ( "system", "Dada a conversa acima, quem deve agir em seguida?" "Ou devemos FINISH? Selecione uma de: {options}", ), ] ).partial(options=str(options), members=", ".join(members))

llm = ChatVertexAI(model_name="gemini-1.5-pro-001", convert_system_message_to_human=True)

supervisor_chain = ( prompt | llm.bind(functions=[function_def], function_call="route") | JsonOutputFunctionsParser() )

components_agent = create_agent(llm, [list_components], "Você é o Agente de Componentes da uMov.me. Você deve fornecer informações sobre os componentes disponíveis no catálogo uMov.me.") records_agent = create_agent( llm, [list_component_records], "Você é o Agente de Registros da uMov.me. Você deve fornecer informações sobre os registros de cada componente disponível no catálogo uMov.me." ) details_agent = create_agent( llm, [get_component_details], "Você é o Agente de Detalhes da uMov.me. Você deve fornecer informações sobre os detalhes da estrutura de cada componente disponível no catálogo uMov.me." )

tools = [get_component_details, list_components, list_component_records]

workflow = StateGraph(AgentState) workflow.add_node("Components", components_agent) workflow.add_node("Records", records_agent) workflow.add_node("Details", details_agent) workflow.add_node("supervisor", supervisor_chain)

for member in members: # We want our workers to ALWAYS "report back" to the supervisor when done workflow.add_edge(member, "supervisor")

The supervisor populates the "next" field in the graph state

which routes to a node or finishes

conditional_map = {k: k for k in members} conditional_map["FINISH"] = END workflow.add_conditional_edges("supervisor", lambda x: x["next"], conditional_map)

Finally, add entrypoint

workflow.set_entry_point("supervisor")

graph = workflow.compile()

def sanitize(messages: list): return [HumanMessage(content=f"{m.content}", type="system") if m.type == "system" else m for m in messages]

messages = [HumanMessage(content="Quais são os componentes disponíveis no catálogo?", type="human")]

sanitized_messages = sanitize(messages)

input_dict = {"messages": sanitized_messages} # Encapsulando as mensagens em um dicionário

for s in graph.stream(input_dict): # Passando o dicionário como argumento if "end" not in s: print(s) print("----")

Error:

ValueError: SystemMessage should be the first in the history.

I really need this code as soon as possible, thanks anyway!

@hinthornw

@hinthornw Can you help me? Pleaase, thanks anyway!