Avaiga / taipy

Turns Data and AI algorithms into production-ready web applications in no time.
https://www.taipy.io
Apache License 2.0
12.16k stars 885 forks source link

[🐛 BUG] Update content behaves differently with debug mode on/off #1814

Open noobHappylife opened 1 week ago

noobHappylife commented 1 week ago

What went wrong? 🤔

I'm working on a LLM chatbot example, I'm using update_content to update the partial while streaming response from the LLM. However, while it works, it only works well in debug mode. While turning debug mode off, the update becomes "chunky". (see the video attached).

Env: Taipy is installed from source, commit 2f33ab1e3cdbc2f91553fe16ff60ea8eeab73422 Ubuntu server 20.04 (Also tested on windows 10)

p.s. I'm not using chat control, because I can't get the streaming response work with it.

Expected Behavior

No response

Steps to Reproduce Issue

Here is the sample code

import os
import base64
from dotenv import load_dotenv
import openai

from taipy.gui import State, Gui, invoke_callback, get_state_id, invoke_long_callback, notify
import taipy.gui.builder as tgb

from PIL import Image

load_dotenv()

def on_init(state):
    state.conv.update_content(state, "")
    state.messages_dict = {}
    state.messages = [
        {
            "role": "assistant",
            "style": "assistant_message", 
            "content": "Hi, how can I help you today?",
        },
    ]
    state.gpt_messages = []
    state.model_host = ""
    state.model_port = ""
    state.model_name = ""
    state.vlm_models = ["model1", "model2", "custom"]
    state.selected_model = "model1"
    new_conv = create_conv(state)
    state.conv.update_content(state, new_conv)
    state.latest_response = 0
    state.client = openai.Client(base_url=f"http://{state.model_host}:{state.model_port}/v1",api_key="null")

def update_state(state: State, resp: str):
    state.messages[-1]["content"] += resp
    if state.latest_response > 4:
        state.conv.update_content(state, create_conv(state))
        state.latest_response = 0
    else:
        state.latest_response += 1

def stream_message(gui, state_id, client, messages, model_name):
    print(f"Stream Message: {state_id}")
    response = client.chat.completions.create(
        messages=messages,
        model=model_name,
        stream=True,
    )

    for chunk in response:
        resp = chunk.choices[0].delta.content
        if resp is None:
            break

        invoke_callback(
            gui,
            state_id,
            update_state,
            [resp],
        )

def get_status(state: State, status: bool):
    if status:
        print("Done")
        state.latest_response = 0
        state.conv.update_content(state, create_conv(state))
        state.gpt_messages.append({
            "role": "assistant",
            "content": [{"type": "text", "text": state.messages[-1]["content"]}],
        })

        # notify(state, "success", "Heavy set function finished!")
    else:
        print("Something went wrong")
        notify(state, "error", "Something went wrong")

def create_conv(state):
    messages_dict = {}
    with tgb.Page() as conversation:
        for i, message in enumerate(state.messages):
#            text = message["content"].replace("<br>", "").replace('"', "'")
            text = message["content"]
            messages_dict[f"message_{i}"] = text
            tgb.text(
                "{messages_dict['" + f"message_{i}" + "'] if messages_dict else ''}",
                class_name=f"message_base {message['style']}",
                mode="md",
                id=f"message_id_{i}",
            )
            tgb.text("", mode="pre")
        # tgb.text(
        #     "{latest_response}",
        #     class_name="message_base assistant_message",
        #     mode="md",
        # )
    state.messages_dict = messages_dict
    return conversation

def encode_image(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")

def create_gpt_conv(state):
    messages = []
    if state.system_prompt != "":
        _m = {
            "role": "system",
            "content": [{"type": "text", "text": f"{state.system_prompt}"}],
        }
        messages.append(_m)

    if state.query_image_path != "":
        base64_image = encode_image(state.query_image_path)
        _m = {
            "role": "user",
            "content": [
                {"type": "text", "text": f"{state.query_message}"},
                {
                    "type": "image_url",
                    "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
                },
            ],
        }
        for existing_message in state.gpt_messages:
            image_exists = len([x for x in existing_message["content"] if x["type"] == "image_url"]) > 0
            if image_exists:
                existing_message["content"].pop(1)
            messages.append(existing_message)
    else:
        _m = {
            "role": "user",
            "content": [{"type": "text", "text": f"{state.query_message}"}],
        }
        messages.extend(state.gpt_messages)
    messages.append(_m)

    state.gpt_messages = messages

    return messages

def send_message(state):
    client = openai.Client(base_url=f"http://{state.model_host}:{state.model_port}/v1",api_key="null")
    messages = create_gpt_conv(state)
    if state.query_image_path == "":
        state.messages.append(
            {
                "role": "user",
                "style": "user_message",
                "content": state.query_message,
            }
        )
    else:
        state.messages.append(
            {
                "role": "user",
                "style": "user_message",
                "content": f"{state.query_message}\n![user_image]({state.query_image_thumbnail_path})",
            }
        )
    # state.conv.update_content(state, create_conv(state))
    state.messages.append(
        {
            "role": "assistant",
            "style": "assistant_message",
            "content": "",
        }
    )
    invoke_long_callback(
        state=state,
        user_function=stream_message,
        user_function_args=[gui, get_state_id(state), client, messages, state.model_name],
        user_status_function=get_status,
        user_status_function_args=[]
    )
    # notify(state, "info", "Sending message...")
    state.query_message = ""
    state.query_image_path = ""
#    state.image_uploaded = False

def upload_image(state):
    try:
        state.image_uploaded = True
        global index
        # Open the original image
        original_image = Image.open(state.query_image_path)

        # Save the original image
        original_image.save(f"images/original_example_{index}.png")

        state.query_image_path = f"images/original_example_{index}.png"
        state.latest_image_path = f"images/original_example_{index}.png"
        # Resize the image to create a thumbnail
        thumbnail_image = original_image.copy()
        thumbnail_image.thumbnail((300, 300))

        # Save the thumbnail
        thumbnail_image.save(f"images/thumbnail_example_{index}.png")

        # Update the state to point to the thumbnail
        state.query_image_thumbnail_path = f"images/thumbnail_example_{index}.png"

        # Increment the index for the next image
        index = index + 1
    except Exception as e:
        state.image_uploaded = False
        notify(
            state,
            "error",
            f"An error occurred: {str(e)}",
        )

def reset_chat(state):
    state.messages = []
    state.gpt_messages = []
    state.query_message = ""
    state.query_image_path = ""
    state.latest_image_path = ""
    state.image_uploaded = False
    state.query_image_thumbnail_path = ""
    state.latest_response = 0
    state.conv.update_content(state, create_conv(state))
    state.selected_model = "model1"
    on_init(state)

def vlm_model_config(name):
    config = {
        "model1": {
            "model_host": "placeholder",
            "model_port": "placeholder",
            "model_name": "placeholder",
        },
        "model2": {
            "model_host": "placeholder",
            "model_port": "placeholder",
            "model_name": "placeholder",
        },
        "custom": {
            "model_host": "",
            "model_port": "",
            "model_name": "custom",
        }
    }
    return config.get(name)

def enlarge_image(state):
    with tgb.Page() as bigimage:
        tgb.image(
            "{state.latest_image_path}",
            width="800px"
        )
    state.bigimage.update_content(state, bigimage)
    state.show_bigimage_dialog = True

def close_image(state, id: str, payload : dict):
    state.show_bigimage_dialog = False

def update_model_info(state):
    config = vlm_model_config(state.selected_model)
    state.model_host = config["model_host"]
    state.model_port = config["model_port"]
    state.model_name = config["model_name"]

if __name__ == "__main__":
    index = 0
    query_image_path = ""
    latest_image_path = ""
    query_image_thumbnail_path = ""
    query_message = ""
    messages = []
    gpt_messages = []
    messages_dict = {}
    model_host = ""
    model_port = ""
    model_name = ""
    system_prompt = ""
    latest_response = 0
    show_bigimage_dialog = False
    image_uploaded = False
    vlm_models = ["model1", "model2", "custom"]
    selected_model = "model1"
    client = openai.Client(api_key="")

    with tgb.Page() as page:
        with tgb.layout(columns="300px 1"):
            with tgb.part(class_name="sidebar"):
                tgb.text("## VLM ChatBot", mode="md")
                tgb.button(
                    "New Conversation",
                    class_name="fullwidth plain",
                    id="reset_app_button",
                    on_action=reset_chat,
                )
                tgb.html("br")
                with tgb.part(render="{image_uploaded}"):
                    tgb.image(
                        content="{latest_image_path}", width="240px", class_name="image_preview", on_action=enlarge_image
                    )

            with tgb.part(class_name="p1"):
                with tgb.expandable("Model: {selected_model}",class_name="card-spacing-half-padding h4", expanded=False):
                    with tgb.layout(columns="1 1 1 1"):
                        tgb.selector(
                            value="{selected_model}",
                            lov="{vlm_models}",
                            label="Select a model",
                            on_change=update_model_info,
                            dropdown=True,
                        )
                        tgb.input(
                            "{model_host}",
                            label="Host IP",
                            change_delay=-1,
                        )
                        tgb.input(
                            "{model_port}",
                            label="Host Port",
                            change_delay=-1,
                        )
                        tgb.input(
                            "{model_name}",
                            label="Model Name",
                            change_delay=-1,
                        )
                    tgb.input(
                        "{system_prompt}",
                        label="System Prompt",
                        change_delay=-1,
                        multiline=True,
                        class_name="fullwidth",
                    )
                with tgb.part(height="600px", class_name="card card_chat"):
                    tgb.part(partial="{conv}")

                with tgb.part("card mt1"):
                    tgb.input(
                        "{query_message}",
                        on_action=send_message,
                        change_delay=-1,
                        label="Write your message:",
                        class_name="fullwidth",
                        multiline=True,
                        lines_shown=3
                    )
                    tgb.file_selector(
                        content="{query_image_path}",
                        on_action=upload_image,
                        extensions=".jpg,.jpeg,.png",
                        label="Upload an image",
                    )
                    # tgb.text("Max file size: 1MB")
            tgb.dialog(
                open="{show_bigimage_dialog}",
#                title="Stop on-going vLLM serving",
#                labels=["Stop"],
                on_action=close_image,
                partial="{bigimage}",
            )

    gui = Gui(page)
    conv = gui.add_partial("")
    bigimage = gui.add_partial("")
    gui.run(
        title="🤖VLM ChatBot", 
        dark_mode=True, 
        margin="0px", 
        host="0.0.0.0", 
        port=34545,
    )

Solution Proposed

No response

Screenshots

Runtime Environment

No response

Browsers

No response

OS

No response

Version of Taipy

No response

Additional Context

No response

Acceptance Criteria

Code of Conduct

FlorianJacta commented 5 days ago

@AlexandreSajus Could you check this issue?

AlexandreSajus commented 5 days ago

@AlexandreSajus Could you check this issue?

EDIT: My bad, I always thought you said the code works when debug was off. I don't really know how I could help here. Maybe R&D has an idea on what can be causing this.

We already discussed this on Discord. I think this is expected behavior. Debug mode has to consume performance somewhere (R&D should know more), and this causes any real-time application to be slower. I'm not sure this is an issue.

KunjShah95 commented 1 day ago

Kindly allow me to help u to solve the bug

FlorianJacta commented 1 day ago

@KunjShah95 You are already assigned to another issue. For hacktoberfest, we only assign issues one at a time. Please submit a PR on the other issue first, or remove your assignment.

Thank you.

KunjShah95 commented 1 day ago

i wanrt to work on this issue as i have removed my previous issue