microsoft / autogen

A programming framework for agentic AI 🤖
https://microsoft.github.io/autogen/
Creative Commons Attribution 4.0 International
31.59k stars 4.59k forks source link

[Issue]: AttributeError: 'RetrieveUserProxyAgent' object has no attribute 'message_generator' #3098

Open Adibahaq opened 2 months ago

Adibahaq commented 2 months ago

Describe the issue

I was running this notebook from Autogen's RAG example: def termination_msg(x): return isinstance(x, dict) and "TERMINATE" == str(x.get("content", ""))[-9:].upper() boss = autogen.UserProxyAgent( name="Boss", is_termination_msg=termination_msg, human_input_mode="NEVER", code_execution_config=False, # we don't want to execute code in this case. default_auto_reply="Reply TERMINATE if the task is done.", description="The boss who ask questions and give tasks.", )

boss_aid = RetrieveUserProxyAgent( name="Boss_Assistant", is_termination_msg=termination_msg, human_input_mode="NEVER", default_auto_reply="Reply TERMINATE if the task is done.", max_consecutive_auto_reply=3, retrieve_config={ "task": "code", "docs_path": "https://raw.githubusercontent.com/microsoft/FLAML/main/website/docs/Examples/Integrate%20-%20Spark.md", "chunk_token_size": 1000, "model": config_list[0]["model"], "collection_name": "groupchat", "get_or_create": True, }, code_execution_config=False, # we don't want to execute code in this case. description="Assistant who has extra content retrieval power for solving difficult problems.", )

coder = AssistantAgent( name="Senior_Python_Engineer", is_termination_msg=termination_msg, system_message="You are a senior python engineer, you provide python code to answer questions. Reply TERMINATE in the end when everything is done.", llm_config=llm_config, description="Senior Python Engineer who can write code to solve problems and answer questions.", )

pm = autogen.AssistantAgent( name="Product_Manager", is_termination_msg=termination_msg, system_message="You are a product manager. Reply TERMINATE in the end when everything is done.", llm_config=llm_config, description="Product Manager who can design and plan the project.", )

reviewer = autogen.AssistantAgent( name="Code_Reviewer", is_termination_msg=termination_msg, system_message="You are a code reviewer. Reply TERMINATE in the end when everything is done.", llm_config=llm_config, description="Code Reviewer who can review the code.", )

PROBLEM = "How to use spark for parallel training in FLAML? Give me sample code."

def _reset_agents(): boss.reset() boss_aid.reset() coder.reset() pm.reset() reviewer.reset()

def rag_chat(): _reset_agents() groupchat = autogen.GroupChat( agents=[boss_aid, pm, coder, reviewer], messages=[], max_round=12, speaker_selection_method="round_robin" ) manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)

# Start chatting with boss_aid as this is the user proxy agent.
boss_aid.initiate_chat(
    manager,
    message=boss_aid.message_generator,
    problem=PROBLEM,
    n_results=3,
)

def norag_chat(): _reset_agents() groupchat = autogen.GroupChat( agents=[boss, pm, coder, reviewer], messages=[], max_round=12, speaker_selection_method="auto", allow_repeat_speaker=False, ) manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)

# Start chatting with the boss as this is the user proxy agent.
boss.initiate_chat(
    manager,
    message=PROBLEM,
)

def call_rag_chat(): _reset_agents()

# In this case, we will have multiple user proxy agents and we don't initiate the chat
# with RAG user proxy agent.
# In order to use RAG user proxy agent, we need to wrap RAG agents in a function and call
# it from other agents.
def retrieve_content(
    message: Annotated[
        str,
        "Refined message which keeps the original meaning and can be used to retrieve content for code generation and question answering.",
    ],
    n_results: Annotated[int, "number of results"] = 3,
) -> str:
    boss_aid.n_results = n_results  # Set the number of results to be retrieved.
    # Check if we need to update the context.
    update_context_case1, update_context_case2 = boss_aid._check_update_context(message)
    if (update_context_case1 or update_context_case2) and boss_aid.update_context:
        boss_aid.problem = message if not hasattr(boss_aid, "problem") else boss_aid.problem
        _, ret_msg = boss_aid._generate_retrieve_user_reply(message)
    else:
        _context = {"problem": message, "n_results": n_results}
        ret_msg = boss_aid.message_generator(boss_aid, None, _context)
    return ret_msg if ret_msg else message

boss_aid.human_input_mode = "NEVER"  # Disable human input for boss_aid since it only retrieves content.

for caller in [pm, coder, reviewer]:
    d_retrieve_content = caller.register_for_llm(
        description="retrieve content for code generation and question answering.", api_style="function"
    )(retrieve_content)

for executor in [boss, pm]:
    executor.register_for_execution()(d_retrieve_content)

groupchat = autogen.GroupChat(
    agents=[boss, pm, coder, reviewer],
    messages=[],
    max_round=12,
    speaker_selection_method="round_robin",
    allow_repeat_speaker=False,
)

manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)

# Start chatting with the boss as this is the user proxy agent.
boss.initiate_chat(
    manager,
    message=PROBLEM,
)

This is the error I'm getting: AttributeError: 'RetrieveUserProxyAgent' object has no attribute 'message_generator'

Or any other time I try to user the RAG user proxy I get this error. Can anyone please tell me how to fix it?

Steps to reproduce

No response

Screenshots and logs

No response

Additional Information

No response

yashdhanore commented 4 weeks ago

i'm facing the same issue!