microsoft / autogen

A programming framework for agentic AI 🤖
https://microsoft.github.io/autogen/
Creative Commons Attribution 4.0 International
34.68k stars 5.01k forks source link

AgentChat pause, resume, and reset #4088

Closed ekzhu closed 2 weeks ago

ekzhu commented 2 weeks ago

Resolves #3859

import asyncio
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.task import MaxMessageTermination
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_ext.models import OpenAIChatCompletionClient

async def main() -> None:
    model_client = OpenAIChatCompletionClient(model="gpt-4o")

    agent1 = AssistantAgent("Assistant1", model_client=model_client)
    agent2 = AssistantAgent("Assistant2", model_client=model_client)
    termination = MaxMessageTermination(3)
    team = RoundRobinGroupChat([agent1, agent2], termination_condition=termination)
    stream = team.run_stream(task="Count from 1 to 10, respond one at a time.")
    async for message in stream:
        print(message)

    await termination.reset()
    stream = team.run_stream()
    async for message in stream:
        print(message)

    await team.reset()
    stream = team.run_stream(task="Count from 1 to 10, respond one at a time.")
    async for message in stream:
        print(message)

asyncio.run(main()) 
source='user' models_usage=None content='Count from 1 to 10, respond one at a time.'
source='Assistant1' models_usage=RequestUsage(prompt_tokens=53, completion_tokens=1) content='1'
source='Assistant2' models_usage=RequestUsage(prompt_tokens=61, completion_tokens=1) content='2'
TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Count from 1 to 10, respond one at a time.'), TextMessage(source='Assistant1', models_usage=RequestUsage(prompt_tokens=53, completion_tokens=1), content='1'), TextMessage(source='Assistant2', models_usage=RequestUsage(prompt_tokens=61, completion_tokens=1), content='2')], stop_reason='Maximum number of messages 3 reached, current message count: 3')
source='Assistant1' models_usage=RequestUsage(prompt_tokens=69, completion_tokens=1) content='3'
source='Assistant2' models_usage=RequestUsage(prompt_tokens=77, completion_tokens=1) content='4'
source='Assistant1' models_usage=RequestUsage(prompt_tokens=85, completion_tokens=1) content='5'
TaskResult(messages=[TextMessage(source='Assistant1', models_usage=RequestUsage(prompt_tokens=69, completion_tokens=1), content='3'), TextMessage(source='Assistant2', models_usage=RequestUsage(prompt_tokens=77, completion_tokens=1), content='4'), TextMessage(source='Assistant1', models_usage=RequestUsage(prompt_tokens=85, completion_tokens=1), content='5')], stop_reason='Maximum number of messages 3 reached, current message count: 3')
source='user' models_usage=None content='Count from 1 to 10, respond one at a time.'
source='Assistant1' models_usage=RequestUsage(prompt_tokens=53, completion_tokens=1) content='1'
source='Assistant2' models_usage=RequestUsage(prompt_tokens=61, completion_tokens=1) content='2'
TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Count from 1 to 10, respond one at a time.'), TextMessage(source='Assistant1', models_usage=RequestUsage(prompt_tokens=53, completion_tokens=1), content='1'), TextMessage(source='Assistant2', models_usage=RequestUsage(prompt_tokens=61, completion_tokens=1), content='2')], stop_reason='Maximum number of messages 3 reached, current message count: 3')