DAGWorks-Inc / burr

Build applications that make decisions (chatbots, agents, simulations, etc...). Monitor, trace, persist, and execute on your own infrastructure.
https://burr.dagworks.io
BSD 3-Clause Clear License
1.24k stars 69 forks source link

Add `state.extend` #363

Closed elijahbenizzy closed 1 month ago

elijahbenizzy commented 1 month ago

Is your feature request related to a problem? Please describe. Append is easy, extend requires either a set, or multiple in a loop.

Describe the solution you'd like

state = State({"a" : [1,2,3]}) # initialize state = state.extend(a=[4,5,6]) # update state

{"a" : [1,2,3,4,5,6]}

Describe alternatives you've considered

just setting the list. Or calling in a loop.

Additional context From discord

hadi-nayebi commented 1 month ago

to add more: I work with actions that emit one or more chat items to the chat history. In the decorator-func syntax, it is easy to handle it. I simply update the state within the body of the func and can append as many as items to the same container in the state machine. or chain append methods in the return statement.

state=state.append(chat_history=chat_item1)
if cond:
    state=state.append(chat_history=chat_item2)

return state.append(
    chat_history=chat_item3
).append(
    chat_history=chat_item4)

Now working with class based syntax for actions, I find it difficult to do it as the main logic happens in the run method where I update the result dict rather than the state object. Since I cannot repeat the keys in the dict, I have to collect all the chat items in the result dict in some form and then update the state in the update method.

I can create a container with the same name as in the state machine and append all the chat items to the container in the run method, and when it comes to the update method, I can iterate and do as I did in the above example:

# run method
result = {"chat_history": []}

result["chat_history"].append(chat_item1)
if cond:
    result["chat_history"].append(chat_item2)

# update method
for key, value in results.items():
    if key="chat_history":
        for item in value:
            state = state.append(chat_history=item)
hadi-nayebi commented 1 month ago

Works.

This is the example I testes:

from burr.core import action, State, ApplicationBuilder
from burr.core import Action

@action(reads=[], writes=["chat_history"])
def human_input(state: State, prompt: str) -> State:
    # your code -- write what you want here!
    return state.append(chat_history=prompt)

# @action(reads=["chat_history"], writes=["response", "chat_history"])
# def ai_response(state: State) -> State:
#     # response = _llm_query.query(state["chat_history"]) # Burr doesn't care how you use LLMs!
#     return state.update(response="content").append(chat_history=["first_item", "second_item"]) # fails

class AIResponse(Action):
    def __init__(self):
        super().__init__()

    def run(self, state: State, **run_kwargs) -> dict:
        result = {"chat_history": ["first_item", "second_item"]}
        return result

    def update(self, result: dict, state: State) -> State:
        for key, value in result.items():
            if key.startswith("_"):
                continue
            elif key == "chat_history":
                state = state.extend(chat_history=value)
            else:
                raise ValueError(f"Key {key} is not valid")
        return state

    @property
    def reads(self) -> list[str]:
        return ["chat_history"]

    @property
    def writes(self) -> list[str]:
        return ["chat_history"]

app = (
    ApplicationBuilder()
    .with_actions(
        human_input=human_input, 
        ai_response=AIResponse()
    ).with_transitions(
        ("human_input", "ai_response"),
        ("ai_response", "human_input")
    ).with_state(chat_history=[])
    .with_entrypoint("human_input")
    .build()
)
*_, state = app.run(halt_after=["ai_response"], inputs={"prompt": "Who was Aaron Burr, sir?"})
print("answer:", app.state["chat_history"])

and the output is:

answer: ['Who was Aaron Burr, sir?', 'first_item', 'second_item']
elijahbenizzy commented 1 month ago

Works.

This is the example I testes:

from burr.core import action, State, ApplicationBuilder
from burr.core import Action

@action(reads=[], writes=["chat_history"])
def human_input(state: State, prompt: str) -> State:
    # your code -- write what you want here!
    return state.append(chat_history=prompt)

# @action(reads=["chat_history"], writes=["response", "chat_history"])
# def ai_response(state: State) -> State:
#     # response = _llm_query.query(state["chat_history"]) # Burr doesn't care how you use LLMs!
#     return state.update(response="content").append(chat_history=["first_item", "second_item"]) # fails

class AIResponse(Action):
    def __init__(self):
        super().__init__()

    def run(self, state: State, **run_kwargs) -> dict:
        result = {"chat_history": ["first_item", "second_item"]}
        return result

    def update(self, result: dict, state: State) -> State:
        for key, value in result.items():
            if key.startswith("_"):
                continue
            elif key == "chat_history":
                state = state.extend(chat_history=value)
            else:
                raise ValueError(f"Key {key} is not valid")
        return state

    @property
    def reads(self) -> list[str]:
        return ["chat_history"]

    @property
    def writes(self) -> list[str]:
        return ["chat_history"]

app = (
    ApplicationBuilder()
    .with_actions(
        human_input=human_input, 
        ai_response=AIResponse()
    ).with_transitions(
        ("human_input", "ai_response"),
        ("ai_response", "human_input")
    ).with_state(chat_history=[])
    .with_entrypoint("human_input")
    .build()
)
*_, state = app.run(halt_after=["ai_response"], inputs={"prompt": "Who was Aaron Burr, sir?"})
print("answer:", app.state["chat_history"])

and the output is:

answer: ['Who was Aaron Burr, sir?', 'first_item', 'second_item']

Works.

This is the example I testes:

from burr.core import action, State, ApplicationBuilder
from burr.core import Action

@action(reads=[], writes=["chat_history"])
def human_input(state: State, prompt: str) -> State:
    # your code -- write what you want here!
    return state.append(chat_history=prompt)

# @action(reads=["chat_history"], writes=["response", "chat_history"])
# def ai_response(state: State) -> State:
#     # response = _llm_query.query(state["chat_history"]) # Burr doesn't care how you use LLMs!
#     return state.update(response="content").append(chat_history=["first_item", "second_item"]) # fails

class AIResponse(Action):
    def __init__(self):
        super().__init__()

    def run(self, state: State, **run_kwargs) -> dict:
        result = {"chat_history": ["first_item", "second_item"]}
        return result

    def update(self, result: dict, state: State) -> State:
        for key, value in result.items():
            if key.startswith("_"):
                continue
            elif key == "chat_history":
                state = state.extend(chat_history=value)
            else:
                raise ValueError(f"Key {key} is not valid")
        return state

    @property
    def reads(self) -> list[str]:
        return ["chat_history"]

    @property
    def writes(self) -> list[str]:
        return ["chat_history"]

app = (
    ApplicationBuilder()
    .with_actions(
        human_input=human_input, 
        ai_response=AIResponse()
    ).with_transitions(
        ("human_input", "ai_response"),
        ("ai_response", "human_input")
    ).with_state(chat_history=[])
    .with_entrypoint("human_input")
    .build()
)
*_, state = app.run(halt_after=["ai_response"], inputs={"prompt": "Who was Aaron Burr, sir?"})
print("answer:", app.state["chat_history"])

and the output is:

answer: ['Who was Aaron Burr, sir?', 'first_item', 'second_item']

Great! We're shipping in the next release, appreciate you testing it out.

elijahbenizzy commented 1 month ago

This is complete!