Closed sinomoe closed 5 months ago
To resolve the issue of history prompts appearing in the wrong order in multi-round ReAct agent chat mode in Dify version 0.6.10, you need to ensure that the history messages are appended in the correct order. The AgentHistoryPromptTransform
class in the api/core/prompt/agent_history_prompt_transform.py
file is responsible for handling the history prompts.
Here is the relevant code snippet from api/core/prompt/agent_history_prompt_transform.py
:
class AgentHistoryPromptTransform(PromptTransform):
"""
History Prompt Transform for Agent App
"""
def __init__(self,
model_config: ModelConfigWithCredentialsEntity,
prompt_messages: list[PromptMessage],
history_messages: list[PromptMessage],
memory: Optional[TokenBufferMemory] = None,
):
self.model_config = model_config
self.prompt_messages = prompt_messages
self.history_messages = history_messages
self.memory = memory
def get_prompt(self) -> list[PromptMessage]:
prompt_messages = []
num_system = 0
for prompt_message in self.history_messages:
if isinstance(prompt_message, SystemPromptMessage):
prompt_messages.append(prompt_message)
num_system += 1
if not self.memory:
return prompt_messages
max_token_limit = self._calculate_rest_token(self.prompt_messages, self.model_config)
model_type_instance = self.model_config.provider_model_bundle.model_type_instance
model_type_instance = cast(LargeLanguageModel, model_type_instance)
curr_message_tokens = model_type_instance.get_num_tokens(
self.memory.model_instance.model,
self.memory.model_instance.credentials,
self.history_messages
)
if curr_message_tokens <= max_token_limit:
return self.history_messages
num_prompt = 0
for prompt_message in self.history_messages[::-1]:
if isinstance(prompt_message, SystemPromptMessage):
continue
prompt_messages.append(prompt_message)
num_prompt += 1
if isinstance(prompt_message, UserPromptMessage):
curr_message_tokens = model_type_instance.get_num_tokens(
self.memory.model_instance.model,
self.memory.model_instance.credentials,
prompt_messages
)
if curr_message_tokens > max_token_limit:
prompt_messages = prompt_messages[:-num_prompt]
break
num_prompt = 0
message_prompts = prompt_messages[num_system:]
message_prompts.reverse()
prompt_messages = prompt_messages[:num_system]
prompt_messages.extend(message_prompts)
return prompt_messages
Additionally, the BaseAgentRunner
class in the api/core/agent/base_agent_runner.py
file has a method organize_agent_history
that organizes the history messages. This method ensures that the messages are retrieved and ordered correctly from the database:
def organize_agent_history(self, prompt_messages: list[PromptMessage]) -> list[PromptMessage]:
"""
Organize agent history
"""
result = []
# check if there is a system message in the beginning of the conversation
for prompt_message in prompt_messages:
if isinstance(prompt_message, SystemPromptMessage):
result.append(prompt_message)
messages: list[Message] = db.session.query(Message).filter(
Message.conversation_id == self.message.conversation_id,
).order_by(Message.created_at.asc()).all()
for message in messages:
if message.id == self.message.id:
continue
result.append(self.organize_agent_user_prompt(message))
agent_thoughts: list[MessageAgentThought] = message.agent_thoughts
if agent_thoughts:
for agent_thought in agent_thoughts:
tools = agent_thought.tool
if tools:
tools = tools.split(';')
tool_calls: list[AssistantPromptMessage.ToolCall] = []
tool_call_response: list[ToolPromptMessage] = []
try:
tool_inputs = json.loads(agent_thought.tool_input)
except Exception as e:
tool_inputs = { tool: {} for tool in tools }
try:
tool_responses = json.loads(agent_thought.observation)
except Exception as e:
tool_responses = { tool: agent_thought.observation for tool in tools }
for tool in tools:
# generate a uuid for tool call
tool_call_id = str(uuid.uuid4())
tool_calls.append(AssistantPromptMessage.ToolCall(
id=tool_call_id,
type='function',
function=AssistantPromptMessage.ToolCall.ToolCallFunction(
name=tool,
arguments=json.dumps(tool_inputs.get(tool, {})),
)
))
tool_call_response.append(ToolPromptMessage(
content=tool_responses.get(tool, agent_thought.observation),
name=tool,
tool_call_id=tool_call_id,
))
result.extend([
AssistantPromptMessage(
content=agent_thought.thought,
tool_calls=tool_calls,
),
*tool_call_response
])
if not tools:
result.append(AssistantPromptMessage(content=agent_thought.thought))
else:
if message.answer:
result.append(AssistantPromptMessage(content=message.answer))
db.session.close()
return result
To ensure the history messages are in the correct order, you should verify that both the get_prompt
method in the AgentHistoryPromptTransform
class and the organize_agent_history
method in the BaseAgentRunner
class correctly process and append the history messages. If the messages are still not in the correct order, you may need to debug these methods to ensure they retrieve and return the messages in the correct sequence [1][2].
To continue talking to Dosu, mention @dosu.
Self Checks
Dify version
0.6.10
Cloud or Self Hosted
Self Hosted (Source)
Steps to reproduce
multi-round chat with an ReAct mode agent
✔️ Expected Behavior
history prompts should be organized in proper way
❌ Actual Behavior
history prompts' order is wrong