langflow-ai / langflow

Langflow is a low-code app builder for RAG and multi-agent AI applications. It’s Python-based and agnostic to any model, API, or database.
http://www.langflow.org
MIT License
31.16k stars 3.87k forks source link

ChatInput Component only processing one image #4005

Open h-arnold opened 1 week ago

h-arnold commented 1 week ago

Bug Description

I've tried uploading more than one image several different ways with no luck. The only way that works is through the playground ChatInput. Any other method seems to either fail completely or only acknowledge the first file in the list.

Reproduction

In all cases, upload the images you plan an using first and make a note of their URL. I have tried the following methods to get Langflow to look at both files:

Passing the filenames directly

Input

{
  "input_value": "How many images do you see?",
    "files": "[\"816eec43-ef5a-4eea-bf67-00f1318ee145/2024-10-03_14-31-04_image1.png\", \"816eec43-ef5a-4eea-bf67-00f1318ee145/2024-10-03_14-31-07_image2.png\"]"
}

Output

{
  "session_id": "816eec43-ef5a-4eea-bf67-00f1318ee145",
  "outputs": [
    {
      "inputs": {
        "input_value": "How many images do you see?"
      },
      "outputs": [
        {
          "results": {
            "message": {
              "text_key": "text",
              "data": {
                "text": "I see one image.",
                "sender": "Machine",
                "sender_name": "AI",
                "session_id": "816eec43-ef5a-4eea-bf67-00f1318ee145",
                "files": [],
                "timestamp": "2024-10-03 14:49:06",
                "flow_id": "816eec43-ef5a-4eea-bf67-00f1318ee145"
              },
              "default_value": "",
              "text": "I see one image.",
              "sender": "Machine",
              "sender_name": "AI",
              "files": [],
              "session_id": "816eec43-ef5a-4eea-bf67-00f1318ee145",
              "timestamp": "2024-10-03 14:49:06",
              "flow_id": "816eec43-ef5a-4eea-bf67-00f1318ee145"
            }
          },
          "artifacts": {
            "message": "I see one image.",
            "sender": "Machine",
            "sender_name": "AI",
            "files": [],
            "type": "object"
          },
          "outputs": {
            "message": {
              "message": {
                "text": "I see one image.",
                "sender": "Machine",
                "sender_name": "AI",
                "session_id": "816eec43-ef5a-4eea-bf67-00f1318ee145",
                "files": [],
                "timestamp": "2024-10-03 14:49:06",
                "flow_id": "816eec43-ef5a-4eea-bf67-00f1318ee145"
              },
              "type": "object"
            }
          },
          "logs": {
            "message": []
          },
          "messages": [
            {
              "message": "I see one image.",
              "sender": "Machine",
              "sender_name": "AI",
              "session_id": "816eec43-ef5a-4eea-bf67-00f1318ee145",
              "component_id": "ChatOutput-LnCSG",
              "files": [],
              "type": "message"
            }
          ],
          "component_display_name": "Chat Output",
          "component_id": "ChatOutput-LnCSG",
          "used_frozen_result": false
        }
      ]
    }
  ]
}

Result: The LLM only sees the one image.

Passing the filenames via a ChatInput Tweak

Input

{
  "input_value": "How many images can you see?",
  "output_type": "chat",
  "input_type": "chat",
  "tweaks": {
    "ChatInput-0Jj5j": {
      "files": "[\"816eec43-ef5a-4eea-bf67-00f1318ee145/2024-10-03_14-31-04_image1.png\", \"816eec43-ef5a-4eea-bf67-00f1318ee145/2024-10-03_14-31-07_image2.png\"]"
    }
  }
}

Output

{
  "detail": "{\"message\":\"Error running graph: Error building Component OpenAI: \\n\\ntoo many values to unpack (expected 2)\",\"traceback\":null,\"description\":null,\"code\":null,\"suggestion\":null}"
}

Result: An error.

Trying to select more than one image at a time through the ChatInput component in the web interface

Result: Can only select one image.

Adding a 2nd file input box to the ChatInput Component

Code

from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES
from langflow.base.io.chat import ChatComponent
from langflow.inputs import BoolInput
from langflow.io import DropdownInput, FileInput, MessageTextInput, MultilineInput, Output
from langflow.memory import store_message
from langflow.schema.message import Message
from langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_USER, MESSAGE_SENDER_USER

class ChatInput(ChatComponent):
    display_name = "Chat Input"
    description = "Get chat inputs from the Playground."
    icon = "ChatInput"
    name = "ChatInput"

    inputs = [
        MultilineInput(
            name="input_value",
            display_name="Text",
            value="",
            info="Message to be passed as input.",
        ),
        BoolInput(
            name="should_store_message",
            display_name="Store Messages",
            info="Store the message in the history.",
            value=True,
            advanced=True,
        ),
        DropdownInput(
            name="sender",
            display_name="Sender Type",
            options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],
            value=MESSAGE_SENDER_USER,
            info="Type of sender.",
            advanced=True,
        ),
        MessageTextInput(
            name="sender_name",
            display_name="Sender Name",
            info="Name of the sender.",
            value=MESSAGE_SENDER_NAME_USER,
            advanced=True,
        ),
        MessageTextInput(
            name="session_id",
            display_name="Session ID",
            info="The session ID of the chat. If empty, the current session ID parameter will be used.",
            advanced=True,
        ),
        FileInput(
            name="files",
            display_name="Files",
            file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,
            info="Files to be sent with the message.",
            advanced=True,
            is_list=True,
        ),
        FileInput(
            name="2nd file",
            display_name="Files",
            file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,
            info="Files to be sent with the message.",
            advanced=True,
            is_list=True,
        ),
    ]
    outputs = [
        Output(display_name="Message", name="message", method="message_response"),
    ]

    def message_response(self) -> Message:
        message = Message(
            text=self.input_value,
            sender=self.sender,
            sender_name=self.sender_name,
            session_id=self.session_id,
            files=self.files,
        )

        if (
            self.session_id
            and isinstance(message, Message)
            and isinstance(message.text, str)
            and self.should_store_message
        ):
            store_message(
                message,
                flow_id=self.graph.flow_id,
            )
            self.message.value = message

        self.status = message
        return message

Result: Only the first image is passed to the OpenAI Component image

What does seem to work

Passing the two images via the Playground Chat Window.

image

Here's the request payload I pulled from the developer tools that seemed to give the correct result:

{
    "inputs": {
        "input_value": "How many images do you see?"
    },
    "files": [
        "816eec43-ef5a-4eea-bf67-00f1318ee145/2024-10-03_15-12-09_image1.png",
        "816eec43-ef5a-4eea-bf67-00f1318ee145/2024-10-03_15-12-13_image2.png"
    ],
    "data": {
        "nodes": [
            {
                "id": "ChatInput-0Jj5j",
                "type": "genericNode",
                "position": {
                    "x": -479.34305776651865,
                    "y": -167.3983715008899
                },
                "data": {
                    "type": "ChatInput",
                    "node": {
                        "template": {
                            "_type": "Component",
                            "2nd file": {
                                "trace_as_metadata": true,
                                "file_path": "816eec43-ef5a-4eea-bf67-00f1318ee145/2024-10-03_14-12-21_image2.png",
                                "fileTypes": [
                                    "txt",
                                    "md",
                                    "mdx",
                                    "csv",
                                    "json",
                                    "yaml",
                                    "yml",
                                    "xml",
                                    "html",
                                    "htm",
                                    "pdf",
                                    "docx",
                                    "py",
                                    "sh",
                                    "sql",
                                    "js",
                                    "ts",
                                    "tsx",
                                    "jpg",
                                    "jpeg",
                                    "png",
                                    "bmp",
                                    "image"
                                ],
                                "list": true,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "2nd file",
                                "value": "image2.png",
                                "display_name": "Files",
                                "advanced": false,
                                "dynamic": false,
                                "info": "Files to be sent with the message.",
                                "title_case": false,
                                "type": "file",
                                "_input_type": "FileInput"
                            },
                            "files": {
                                "trace_as_metadata": true,
                                "file_path": "816eec43-ef5a-4eea-bf67-00f1318ee145/2024-10-03_14-12-16_image1.png",
                                "fileTypes": [
                                    "txt",
                                    "md",
                                    "mdx",
                                    "csv",
                                    "json",
                                    "yaml",
                                    "yml",
                                    "xml",
                                    "html",
                                    "htm",
                                    "pdf",
                                    "docx",
                                    "py",
                                    "sh",
                                    "sql",
                                    "js",
                                    "ts",
                                    "tsx",
                                    "jpg",
                                    "jpeg",
                                    "png",
                                    "bmp",
                                    "image"
                                ],
                                "list": true,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "files",
                                "value": "image1.png",
                                "display_name": "Files",
                                "advanced": false,
                                "dynamic": false,
                                "info": "Files to be sent with the message.",
                                "title_case": false,
                                "type": "file",
                                "_input_type": "FileInput",
                                "load_from_db": false
                            },
                            "code": {
                                "type": "code",
                                "required": true,
                                "placeholder": "",
                                "list": false,
                                "show": true,
                                "multiline": true,
                                "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, FileInput, MessageTextInput, MultilineInput, Output\nfrom langflow.memory import store_message\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_USER, MESSAGE_SENDER_USER\n\n\nclass ChatInput(ChatComponent):\n    display_name = \"Chat Input\"\n    description = \"Get chat inputs from the Playground.\"\n    icon = \"ChatInput\"\n    name = \"ChatInput\"\n\n    inputs = [\n        MultilineInput(\n            name=\"input_value\",\n            display_name=\"Text\",\n            value=\"\",\n            info=\"Message to be passed as input.\",\n        ),\n        BoolInput(\n            name=\"should_store_message\",\n            display_name=\"Store Messages\",\n            info=\"Store the message in the history.\",\n            value=True,\n            advanced=True,\n        ),\n        DropdownInput(\n            name=\"sender\",\n            display_name=\"Sender Type\",\n            options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n            value=MESSAGE_SENDER_USER,\n            info=\"Type of sender.\",\n            advanced=True,\n        ),\n        MessageTextInput(\n            name=\"sender_name\",\n            display_name=\"Sender Name\",\n            info=\"Name of the sender.\",\n            value=MESSAGE_SENDER_NAME_USER,\n            advanced=True,\n        ),\n        MessageTextInput(\n            name=\"session_id\",\n            display_name=\"Session ID\",\n            info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n            advanced=True,\n        ),\n        FileInput(\n            name=\"files\",\n            display_name=\"Files\",\n            file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n            info=\"Files to be sent with the message.\",\n            advanced=True,\n            is_list=True,\n        ),\n        FileInput(\n            name=\"2nd file\",\n            display_name=\"Files\",\n            file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n            info=\"Files to be sent with the message.\",\n            advanced=True,\n            is_list=True,\n        ),\n    ]\n    outputs = [\n        Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n    ]\n\n    def message_response(self) -> Message:\n        message = Message(\n            text=self.input_value,\n            sender=self.sender,\n            sender_name=self.sender_name,\n            session_id=self.session_id,\n            files=self.files,\n        )\n\n        if (\n            self.session_id\n            and isinstance(message, Message)\n            and isinstance(message.text, str)\n            and self.should_store_message\n        ):\n            store_message(\n                message,\n                flow_id=self.graph.flow_id,\n            )\n            self.message.value = message\n\n        self.status = message\n        return message\n",
                                "fileTypes": [],
                                "file_path": "",
                                "password": false,
                                "name": "code",
                                "advanced": true,
                                "dynamic": true,
                                "info": "",
                                "load_from_db": false,
                                "title_case": false
                            },
                            "input_value": {
                                "trace_as_input": true,
                                "multiline": true,
                                "trace_as_metadata": true,
                                "load_from_db": false,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "input_value",
                                "value": "How many images do you see?",
                                "display_name": "Text",
                                "advanced": false,
                                "input_types": [
                                    "Message"
                                ],
                                "dynamic": false,
                                "info": "Message to be passed as input.",
                                "title_case": false,
                                "type": "str",
                                "_input_type": "MultilineInput"
                            },
                            "sender": {
                                "trace_as_metadata": true,
                                "options": [
                                    "Machine",
                                    "User"
                                ],
                                "combobox": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "sender",
                                "value": "User",
                                "display_name": "Sender Type",
                                "advanced": true,
                                "dynamic": false,
                                "info": "Type of sender.",
                                "title_case": false,
                                "type": "str",
                                "_input_type": "DropdownInput"
                            },
                            "sender_name": {
                                "trace_as_input": true,
                                "trace_as_metadata": true,
                                "load_from_db": false,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "sender_name",
                                "value": "User",
                                "display_name": "Sender Name",
                                "advanced": true,
                                "input_types": [
                                    "Message"
                                ],
                                "dynamic": false,
                                "info": "Name of the sender.",
                                "title_case": false,
                                "type": "str",
                                "_input_type": "MessageTextInput"
                            },
                            "session_id": {
                                "trace_as_input": true,
                                "trace_as_metadata": true,
                                "load_from_db": false,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "session_id",
                                "value": "",
                                "display_name": "Session ID",
                                "advanced": true,
                                "input_types": [
                                    "Message"
                                ],
                                "dynamic": false,
                                "info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
                                "title_case": false,
                                "type": "str",
                                "_input_type": "MessageTextInput"
                            },
                            "should_store_message": {
                                "trace_as_metadata": true,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "should_store_message",
                                "value": true,
                                "display_name": "Store Messages",
                                "advanced": true,
                                "dynamic": false,
                                "info": "Store the message in the history.",
                                "title_case": false,
                                "type": "bool",
                                "_input_type": "BoolInput"
                            }
                        },
                        "description": "Get chat inputs from the Playground.",
                        "icon": "ChatInput",
                        "base_classes": [
                            "Message"
                        ],
                        "display_name": "Chat Input",
                        "documentation": "",
                        "custom_fields": {},
                        "output_types": [],
                        "pinned": false,
                        "conditional_paths": [],
                        "frozen": false,
                        "outputs": [
                            {
                                "types": [
                                    "Message"
                                ],
                                "selected": "Message",
                                "name": "message",
                                "display_name": "Message",
                                "method": "message_response",
                                "value": "__UNDEFINED__",
                                "cache": true
                            }
                        ],
                        "field_order": [
                            "input_value",
                            "should_store_message",
                            "sender",
                            "sender_name",
                            "session_id",
                            "files",
                            "2nd file"
                        ],
                        "beta": false,
                        "edited": true,
                        "metadata": {},
                        "lf_version": "1.0.18"
                    },
                    "id": "ChatInput-0Jj5j",
                    "description": "Get chat inputs from the Playground.",
                    "display_name": "Chat Input"
                },
                "selected": true,
                "width": 384,
                "height": 461,
                "positionAbsolute": {
                    "x": -479.34305776651865,
                    "y": -167.3983715008899
                },
                "dragging": false
            },
            {
                "id": "ChatOutput-LnCSG",
                "type": "genericNode",
                "position": {
                    "x": 879.2731119805974,
                    "y": -177.89005994568518
                },
                "data": {
                    "type": "ChatOutput",
                    "node": {
                        "template": {
                            "_type": "Component",
                            "code": {
                                "type": "code",
                                "required": true,
                                "placeholder": "",
                                "list": false,
                                "show": true,
                                "multiline": true,
                                "value": "from langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.memory import store_message\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass ChatOutput(ChatComponent):\n    display_name = \"Chat Output\"\n    description = \"Display a chat message in the Playground.\"\n    icon = \"ChatOutput\"\n    name = \"ChatOutput\"\n\n    inputs = [\n        MessageTextInput(\n            name=\"input_value\",\n            display_name=\"Text\",\n            info=\"Message to be passed as output.\",\n        ),\n        BoolInput(\n            name=\"should_store_message\",\n            display_name=\"Store Messages\",\n            info=\"Store the message in the history.\",\n            value=True,\n            advanced=True,\n        ),\n        DropdownInput(\n            name=\"sender\",\n            display_name=\"Sender Type\",\n            options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n            value=MESSAGE_SENDER_AI,\n            advanced=True,\n            info=\"Type of sender.\",\n        ),\n        MessageTextInput(\n            name=\"sender_name\",\n            display_name=\"Sender Name\",\n            info=\"Name of the sender.\",\n            value=MESSAGE_SENDER_NAME_AI,\n            advanced=True,\n        ),\n        MessageTextInput(\n            name=\"session_id\",\n            display_name=\"Session ID\",\n            info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n            advanced=True,\n        ),\n        MessageTextInput(\n            name=\"data_template\",\n            display_name=\"Data Template\",\n            value=\"{text}\",\n            advanced=True,\n            info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n        ),\n    ]\n    outputs = [\n        Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n    ]\n\n    def message_response(self) -> Message:\n        message = Message(\n            text=self.input_value,\n            sender=self.sender,\n            sender_name=self.sender_name,\n            session_id=self.session_id,\n        )\n        if (\n            self.session_id\n            and isinstance(message, Message)\n            and isinstance(message.text, str)\n            and self.should_store_message\n        ):\n            store_message(\n                message,\n                flow_id=self.graph.flow_id,\n            )\n            self.message.value = message\n\n        self.status = message\n        return message\n",
                                "fileTypes": [],
                                "file_path": "",
                                "password": false,
                                "name": "code",
                                "advanced": true,
                                "dynamic": true,
                                "info": "",
                                "load_from_db": false,
                                "title_case": false
                            },
                            "data_template": {
                                "trace_as_input": true,
                                "trace_as_metadata": true,
                                "load_from_db": false,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "data_template",
                                "value": "{text}",
                                "display_name": "Data Template",
                                "advanced": true,
                                "input_types": [
                                    "Message"
                                ],
                                "dynamic": false,
                                "info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.",
                                "title_case": false,
                                "type": "str",
                                "_input_type": "MessageTextInput"
                            },
                            "input_value": {
                                "trace_as_input": true,
                                "trace_as_metadata": true,
                                "load_from_db": false,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "input_value",
                                "value": "",
                                "display_name": "Text",
                                "advanced": false,
                                "input_types": [
                                    "Message"
                                ],
                                "dynamic": false,
                                "info": "Message to be passed as output.",
                                "title_case": false,
                                "type": "str",
                                "_input_type": "MessageTextInput"
                            },
                            "sender": {
                                "trace_as_metadata": true,
                                "options": [
                                    "Machine",
                                    "User"
                                ],
                                "combobox": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "sender",
                                "value": "Machine",
                                "display_name": "Sender Type",
                                "advanced": true,
                                "dynamic": false,
                                "info": "Type of sender.",
                                "title_case": false,
                                "type": "str",
                                "_input_type": "DropdownInput"
                            },
                            "sender_name": {
                                "trace_as_input": true,
                                "trace_as_metadata": true,
                                "load_from_db": false,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "sender_name",
                                "value": "AI",
                                "display_name": "Sender Name",
                                "advanced": true,
                                "input_types": [
                                    "Message"
                                ],
                                "dynamic": false,
                                "info": "Name of the sender.",
                                "title_case": false,
                                "type": "str",
                                "_input_type": "MessageTextInput"
                            },
                            "session_id": {
                                "trace_as_input": true,
                                "trace_as_metadata": true,
                                "load_from_db": false,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "session_id",
                                "value": "",
                                "display_name": "Session ID",
                                "advanced": true,
                                "input_types": [
                                    "Message"
                                ],
                                "dynamic": false,
                                "info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
                                "title_case": false,
                                "type": "str",
                                "_input_type": "MessageTextInput"
                            },
                            "should_store_message": {
                                "trace_as_metadata": true,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "should_store_message",
                                "value": true,
                                "display_name": "Store Messages",
                                "advanced": true,
                                "dynamic": false,
                                "info": "Store the message in the history.",
                                "title_case": false,
                                "type": "bool",
                                "_input_type": "BoolInput"
                            }
                        },
                        "description": "Display a chat message in the Playground.",
                        "icon": "ChatOutput",
                        "base_classes": [
                            "Message"
                        ],
                        "display_name": "Chat Output",
                        "documentation": "",
                        "custom_fields": {},
                        "output_types": [],
                        "pinned": false,
                        "conditional_paths": [],
                        "frozen": false,
                        "outputs": [
                            {
                                "types": [
                                    "Message"
                                ],
                                "selected": "Message",
                                "name": "message",
                                "display_name": "Message",
                                "method": "message_response",
                                "value": "__UNDEFINED__",
                                "cache": true
                            }
                        ],
                        "field_order": [
                            "input_value",
                            "should_store_message",
                            "sender",
                            "sender_name",
                            "session_id",
                            "data_template"
                        ],
                        "beta": false,
                        "edited": false,
                        "metadata": {},
                        "lf_version": "1.0.18"
                    },
                    "id": "ChatOutput-LnCSG",
                    "description": "Display a chat message in the Playground.",
                    "display_name": "Chat Output"
                },
                "selected": false,
                "width": 384,
                "height": 289,
                "positionAbsolute": {
                    "x": 879.2731119805974,
                    "y": -177.89005994568518
                },
                "dragging": false
            },
            {
                "id": "OpenAIModel-5dcue",
                "type": "genericNode",
                "position": {
                    "x": 234.7910720404243,
                    "y": -382.99556190220443
                },
                "data": {
                    "type": "OpenAIModel",
                    "node": {
                        "template": {
                            "_type": "Component",
                            "output_parser": {
                                "trace_as_metadata": true,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "output_parser",
                                "value": "",
                                "display_name": "Output Parser",
                                "advanced": true,
                                "input_types": [
                                    "OutputParser"
                                ],
                                "dynamic": false,
                                "info": "The parser to use to parse the output of the model",
                                "title_case": false,
                                "type": "other",
                                "_input_type": "HandleInput"
                            },
                            "api_key": {
                                "load_from_db": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "api_key",
                                "value": "MY API KEY",
                                "display_name": "OpenAI API Key",
                                "advanced": false,
                                "input_types": [
                                    "Message"
                                ],
                                "dynamic": false,
                                "info": "The OpenAI API Key to use for the OpenAI model.",
                                "title_case": false,
                                "password": true,
                                "type": "str",
                                "_input_type": "SecretStrInput"
                            },
                            "code": {
                                "type": "code",
                                "required": true,
                                "placeholder": "",
                                "list": false,
                                "show": true,
                                "multiline": true,
                                "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n    BoolInput,\n    DictInput,\n    DropdownInput,\n    FloatInput,\n    IntInput,\n    SecretStrInput,\n    StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n    display_name = \"OpenAI\"\n    description = \"Generates text using OpenAI LLMs.\"\n    icon = \"OpenAI\"\n    name = \"OpenAIModel\"\n\n    inputs = LCModelComponent._base_inputs + [\n        IntInput(\n            name=\"max_tokens\",\n            display_name=\"Max Tokens\",\n            advanced=True,\n            info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n            range_spec=RangeSpec(min=0, max=128000),\n        ),\n        DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n        BoolInput(\n            name=\"json_mode\",\n            display_name=\"JSON Mode\",\n            advanced=True,\n            info=\"If True, it will output JSON regardless of passing a schema.\",\n        ),\n        DictInput(\n            name=\"output_schema\",\n            is_list=True,\n            display_name=\"Schema\",\n            advanced=True,\n            info=\"The schema for the Output of the model. \"\n            \"You must pass the word JSON in the prompt. \"\n            \"If left blank, JSON mode will be disabled.\",\n        ),\n        DropdownInput(\n            name=\"model_name\",\n            display_name=\"Model Name\",\n            advanced=False,\n            options=OPENAI_MODEL_NAMES,\n            value=OPENAI_MODEL_NAMES[0],\n        ),\n        StrInput(\n            name=\"openai_api_base\",\n            display_name=\"OpenAI API Base\",\n            advanced=True,\n            info=\"The base URL of the OpenAI API. \"\n            \"Defaults to https://api.openai.com/v1. \"\n            \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n        ),\n        SecretStrInput(\n            name=\"api_key\",\n            display_name=\"OpenAI API Key\",\n            info=\"The OpenAI API Key to use for the OpenAI model.\",\n            advanced=False,\n            value=\"OPENAI_API_KEY\",\n        ),\n        FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n        IntInput(\n            name=\"seed\",\n            display_name=\"Seed\",\n            info=\"The seed controls the reproducibility of the job.\",\n            advanced=True,\n            value=1,\n        ),\n        HandleInput(\n            name=\"output_parser\",\n            display_name=\"Output Parser\",\n            info=\"The parser to use to parse the output of the model\",\n            advanced=True,\n            input_types=[\"OutputParser\"],\n        ),\n    ]\n\n    def build_model(self) -> LanguageModel:  # type: ignore[type-var]\n        # self.output_schema is a list of dictionaries\n        # let's convert it to a dictionary\n        output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n        openai_api_key = self.api_key\n        temperature = self.temperature\n        model_name: str = self.model_name\n        max_tokens = self.max_tokens\n        model_kwargs = self.model_kwargs or {}\n        openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n        json_mode = bool(output_schema_dict) or self.json_mode\n        seed = self.seed\n\n        api_key = SecretStr(openai_api_key) if openai_api_key else None\n        output = ChatOpenAI(\n            max_tokens=max_tokens or None,\n            model_kwargs=model_kwargs,\n            model=model_name,\n            base_url=openai_api_base,\n            api_key=api_key,\n            temperature=temperature if temperature is not None else 0.1,\n            seed=seed,\n        )\n        if json_mode:\n            if output_schema_dict:\n                output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")  # type: ignore\n            else:\n                output = output.bind(response_format={\"type\": \"json_object\"})  # type: ignore\n\n        return output  # type: ignore\n\n    def _get_exception_message(self, e: Exception):\n        \"\"\"\n        Get a message from an OpenAI exception.\n\n        Args:\n            exception (Exception): The exception to get the message from.\n\n        Returns:\n            str: The message from the exception.\n        \"\"\"\n\n        try:\n            from openai import BadRequestError\n        except ImportError:\n            return None\n        if isinstance(e, BadRequestError):\n            message = e.body.get(\"message\")  # type: ignore\n            if message:\n                return message\n        return None\n",
                                "fileTypes": [],
                                "file_path": "",
                                "password": false,
                                "name": "code",
                                "advanced": true,
                                "dynamic": true,
                                "info": "",
                                "load_from_db": false,
                                "title_case": false
                            },
                            "input_value": {
                                "trace_as_input": true,
                                "trace_as_metadata": true,
                                "load_from_db": false,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "input_value",
                                "value": "",
                                "display_name": "Input",
                                "advanced": false,
                                "input_types": [
                                    "Message"
                                ],
                                "dynamic": false,
                                "info": "",
                                "title_case": false,
                                "type": "str",
                                "_input_type": "MessageInput"
                            },
                            "json_mode": {
                                "trace_as_metadata": true,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "json_mode",
                                "value": false,
                                "display_name": "JSON Mode",
                                "advanced": true,
                                "dynamic": false,
                                "info": "If True, it will output JSON regardless of passing a schema.",
                                "title_case": false,
                                "type": "bool",
                                "_input_type": "BoolInput"
                            },
                            "max_tokens": {
                                "trace_as_metadata": true,
                                "range_spec": {
                                    "step_type": "float",
                                    "min": 0,
                                    "max": 128000,
                                    "step": 0.1
                                },
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "max_tokens",
                                "value": "",
                                "display_name": "Max Tokens",
                                "advanced": true,
                                "dynamic": false,
                                "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
                                "title_case": false,
                                "type": "int",
                                "_input_type": "IntInput"
                            },
                            "model_kwargs": {
                                "trace_as_input": true,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "model_kwargs",
                                "value": {},
                                "display_name": "Model Kwargs",
                                "advanced": true,
                                "dynamic": false,
                                "info": "",
                                "title_case": false,
                                "type": "dict",
                                "_input_type": "DictInput"
                            },
                            "model_name": {
                                "trace_as_metadata": true,
                                "options": [
                                    "gpt-4o-mini",
                                    "gpt-4o",
                                    "gpt-4-turbo",
                                    "gpt-4-turbo-preview",
                                    "gpt-4",
                                    "gpt-3.5-turbo",
                                    "gpt-3.5-turbo-0125"
                                ],
                                "combobox": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "model_name",
                                "value": "gpt-4o-mini",
                                "display_name": "Model Name",
                                "advanced": false,
                                "dynamic": false,
                                "info": "",
                                "title_case": false,
                                "type": "str",
                                "_input_type": "DropdownInput"
                            },
                            "openai_api_base": {
                                "trace_as_metadata": true,
                                "load_from_db": false,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "openai_api_base",
                                "value": "",
                                "display_name": "OpenAI API Base",
                                "advanced": true,
                                "dynamic": false,
                                "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.",
                                "title_case": false,
                                "type": "str",
                                "_input_type": "StrInput"
                            },
                            "output_schema": {
                                "trace_as_input": true,
                                "list": true,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "output_schema",
                                "value": {},
                                "display_name": "Schema",
                                "advanced": true,
                                "dynamic": false,
                                "info": "The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.",
                                "title_case": false,
                                "type": "dict",
                                "_input_type": "DictInput"
                            },
                            "seed": {
                                "trace_as_metadata": true,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "seed",
                                "value": 1,
                                "display_name": "Seed",
                                "advanced": true,
                                "dynamic": false,
                                "info": "The seed controls the reproducibility of the job.",
                                "title_case": false,
                                "type": "int",
                                "_input_type": "IntInput"
                            },
                            "stream": {
                                "trace_as_metadata": true,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "stream",
                                "value": false,
                                "display_name": "Stream",
                                "advanced": true,
                                "dynamic": false,
                                "info": "Stream the response from the model. Streaming works only in Chat.",
                                "title_case": false,
                                "type": "bool",
                                "_input_type": "BoolInput"
                            },
                            "system_message": {
                                "trace_as_input": true,
                                "trace_as_metadata": true,
                                "load_from_db": false,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "system_message",
                                "value": "",
                                "display_name": "System Message",
                                "advanced": true,
                                "input_types": [
                                    "Message"
                                ],
                                "dynamic": false,
                                "info": "System message to pass to the model.",
                                "title_case": false,
                                "type": "str",
                                "_input_type": "MessageTextInput"
                            },
                            "temperature": {
                                "trace_as_metadata": true,
                                "list": false,
                                "required": false,
                                "placeholder": "",
                                "show": true,
                                "name": "temperature",
                                "value": 0.1,
                                "display_name": "Temperature",
                                "advanced": false,
                                "dynamic": false,
                                "info": "",
                                "title_case": false,
                                "type": "float",
                                "_input_type": "FloatInput"
                            }
                        },
                        "description": "Generates text using OpenAI LLMs.",
                        "icon": "OpenAI",
                        "base_classes": [
                            "LanguageModel",
                            "Message"
                        ],
                        "display_name": "OpenAI",
                        "documentation": "",
                        "custom_fields": {},
                        "output_types": [],
                        "pinned": false,
                        "conditional_paths": [],
                        "frozen": false,
                        "outputs": [
                            {
                                "types": [
                                    "Message"
                                ],
                                "selected": "Message",
                                "name": "text_output",
                                "display_name": "Text",
                                "method": "text_response",
                                "value": "__UNDEFINED__",
                                "cache": true,
                                "required_inputs": [
                                    "input_value",
                                    "stream",
                                    "system_message"
                                ]
                            },
                            {
                                "types": [
                                    "LanguageModel"
                                ],
                                "selected": "LanguageModel",
                                "name": "model_output",
                                "display_name": "Language Model",
                                "method": "build_model",
                                "value": "__UNDEFINED__",
                                "cache": true,
                                "required_inputs": [
                                    "api_key",
                                    "json_mode",
                                    "max_tokens",
                                    "model_kwargs",
                                    "model_name",
                                    "openai_api_base",
                                    "output_schema",
                                    "seed",
                                    "temperature"
                                ]
                            }
                        ],
                        "field_order": [
                            "input_value",
                            "system_message",
                            "stream",
                            "max_tokens",
                            "model_kwargs",
                            "json_mode",
                            "output_schema",
                            "model_name",
                            "openai_api_base",
                            "api_key",
                            "temperature",
                            "seed",
                            "output_parser"
                        ],
                        "beta": false,
                        "edited": false,
                        "metadata": {},
                        "lf_version": "1.0.18"
                    },
                    "id": "OpenAIModel-5dcue",
                    "description": "Generates text using OpenAI LLMs.",
                    "display_name": "OpenAI"
                },
                "selected": false,
                "width": 384,
                "height": 587,
                "dragging": false,
                "positionAbsolute": {
                    "x": 234.7910720404243,
                    "y": -382.99556190220443
                }
            }
        ],
        "edges": [
            {
                "source": "ChatInput-0Jj5j",
                "sourceHandle": "{œdataTypeœ:œChatInputœ,œidœ:œChatInput-0Jj5jœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}",
                "target": "OpenAIModel-5dcue",
                "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-5dcueœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
                "data": {
                    "targetHandle": {
                        "fieldName": "input_value",
                        "id": "OpenAIModel-5dcue",
                        "inputTypes": [
                            "Message"
                        ],
                        "type": "str"
                    },
                    "sourceHandle": {
                        "dataType": "ChatInput",
                        "id": "ChatInput-0Jj5j",
                        "name": "message",
                        "output_types": [
                            "Message"
                        ]
                    }
                },
                "id": "reactflow__edge-ChatInput-0Jj5j{œdataTypeœ:œChatInputœ,œidœ:œChatInput-0Jj5jœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-OpenAIModel-5dcue{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-5dcueœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
                "className": "",
                "animated": false
            },
            {
                "source": "OpenAIModel-5dcue",
                "sourceHandle": "{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-5dcueœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}",
                "target": "ChatOutput-LnCSG",
                "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-LnCSGœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
                "data": {
                    "targetHandle": {
                        "fieldName": "input_value",
                        "id": "ChatOutput-LnCSG",
                        "inputTypes": [
                            "Message"
                        ],
                        "type": "str"
                    },
                    "sourceHandle": {
                        "dataType": "OpenAIModel",
                        "id": "OpenAIModel-5dcue",
                        "name": "text_output",
                        "output_types": [
                            "Message"
                        ]
                    }
                },
                "id": "reactflow__edge-OpenAIModel-5dcue{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-5dcueœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-LnCSG{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-LnCSGœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
                "className": "",
                "animated": false
            }
        ]
    }
}

Expected behavior

That I should be able to pass a list of file paths and have the LLM recognise both, as seems to happen when I run the flow through the playground chat window.

Who can help?

No response

Operating System

Google Cloud Run - latest nightly docker image

Langflow Version

1.0.18 nightly as of 03/10/2024

Python Version

None

Screenshot

No response

Flow File

No response

dosubot[bot] commented 1 week ago

To address the issue of the ChatInput component only processing one image, you should ensure that the files attribute is correctly handled as a list of file paths. The FileInput is already configured to accept multiple files with is_list=True, so the problem might be in how the files are being processed or passed to the OpenAIModel component.

Here are a few steps to troubleshoot and potentially resolve the issue:

  1. Verify File Handling in message_response: Ensure that the message_response method in the ChatInput component correctly processes the files attribute as a list. The files attribute should be passed as a list to the Message object.

  2. Check JSON Input Format: When passing the filenames directly or via a tweak, ensure that the JSON input correctly represents the list of files. The files attribute should be a JSON array, not a string representation of an array. For example:

    "files": ["816eec43-ef5a-4eea-bf67-00f1318ee145/2024-10-03_14-31-04_image1.png", "816eec43-ef5a-4eea-bf67-00f1318ee145/2024-10-03_14-31-07_image2.png"]
  3. Error Handling: The error message "too many values to unpack (expected 2)" suggests there might be an issue with how the files are being unpacked or processed. Review the code where the files are being handled to ensure it correctly iterates over the list of files.

  4. Playground Chat Window: Since the playground chat window works correctly, compare the request payloads and processing logic between the playground and your implementation to identify any discrepancies.

  5. Component Configuration: Ensure that the OpenAIModel component is configured to accept and process multiple files. If the component expects a single file, it might need adjustments to handle a list of files.

By following these steps, you should be able to identify and resolve the issue with processing multiple image files in the ChatInput component.

To continue talking to Dosu, mention @dosu.

h-arnold commented 1 week ago

@dosu you can't add more than one file via the Chat Input GUI and you can only pass a list via the API by encapsulating it in a string first. There isn't any information in the swagger docs about how to correctly pass a file via the API and in any case, the request object from the playground suggests that a list is the appropriate format to pass file paths but I've not had any luck doing that.

dosubot[bot] commented 1 week ago

To correctly pass multiple file paths via the API to the ChatInput component, you should use a list format for the files input. The FileInput for files is configured with is_list=True, indicating that it accepts multiple files. Therefore, you should pass the file paths as a list, not as a string representation of a list.

Here's an example of how the JSON structure should look when making a request to the API:

{
  "input_value": "Your message here",
  "sender": "User",
  "sender_name": "User",
  "session_id": "your-session-id",
  "files": ["path/to/file1.png", "path/to/file2.png"]
}

Ensure that the files field contains a JSON array of file paths, not a string. This should align with the expected input format and allow the ChatInput component to process multiple files correctly [1].

To continue talking to Dosu, mention @dosu.

h-arnold commented 1 week ago

Thanks @dosu . I can confirm the request structure you suggest does not work for me, even after I've re-uploaded the images via the API and used the path directly from the response object for each.

dosubot[bot] commented 1 week ago

If the request structure with a JSON array for the files input is not working, there might be an issue with how the ChatInput component or the API is processing the file paths. Here are a few additional steps to consider:

  1. Check API Endpoint: Ensure that you are using the correct API endpoint and that it is configured to accept multiple file paths. Double-check the endpoint documentation or code to confirm the expected input format.

  2. Inspect Server Logs: Look at the server logs to see if there are any error messages or warnings when you send the request. This might provide clues about what is going wrong.

  3. Debugging: Add logging or debugging statements in the ChatInput component to verify that the files array is being received and processed as expected. This can help identify where the issue might be occurring.

  4. Middleware or Validation: Check if there is any middleware or validation logic that might be altering the request payload before it reaches the ChatInput component. This could be affecting how the files are processed [1].

  5. Compare with Playground: Since the playground chat window works correctly, compare the request payloads and processing logic between the playground and your API implementation to identify any discrepancies.

By following these steps, you should be able to pinpoint the issue and ensure that multiple file paths are processed correctly.

To continue talking to Dosu, mention @dosu.

joaoguilhermeS commented 2 days ago

Hi @h-arnold ,

Thank you for reporting this issue!

I've analyzed your problem and found that it's a frontend limitation when uploading multiple files. However, you can work around this by manually passing the files as a list-similar to how you've done it. Here's an example:

message = Message(
    text=self.input_value,
    sender=self.sender,
    sender_name=self.sender_name,
    session_id=self.session_id,
    files=[self.files, self.second_file, ...],
)

To properly pass the files to OpenAI, you need to build a Message object as the chat input does and how you did before. I'll report this limitation to the team for further investigation.

h-arnold commented 2 days ago

@joaoguilhermeS

Thanks so much for looking into this. :)

How would I do this via the API? I've not been able to figure out a way of structuring the request to send more than one image, or image path at a time and there doesn't seem to be any examples in the swagger or langflow docs.

joaoguilhermeS commented 1 day ago

Sure @h-arnold, I am glad to help! You can set the atributes using the tweaks flags, so you can add as many images as you configured on your Chat Input doing a curl similar to this one:

curl -X POST \
  "http://127.0.0.1:7860/api/v1/run/354a4875-3efa-4086-883e-cc3f2af76052?stream=false" \
  -H 'Content-Type: application/json' \
  -d '{
    "output_type": "chat",
    "input_type": "chat",
    "tweaks": {
      "ChatInput-Zqr2N": {
        "files": "img1.jpg",
        "second_file": "img3.jpeg"
      },
      "OpenAIModel-9zcSN": {
        "api_key": "OPENAI_API_KEY"
      }
    }
  }'

You can check the lower right button on the canvas where you have an API button where you can see more information about structuring your Flow parameters enabling the tweaks flag image).

Here are the docs: https://docs.langflow.org/workspace-api

h-arnold commented 21 hours ago

@joaoguilhermeS

Thanks so much for your help! I modified the ChatInput as you suggested for a quick workaround and I've posted the result below in case anyone else needs it until the frontend gets updated.

from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES
from langflow.base.io.chat import ChatComponent
from langflow.inputs import BoolInput
from langflow.io import DropdownInput, FileInput, MessageTextInput, MultilineInput, Output
from langflow.memory import store_message
from langflow.schema.message import Message
from langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_USER, MESSAGE_SENDER_USER

class ChatInput(ChatComponent):
    display_name = "Chat Input"
    description = "Get chat inputs from the Playground."
    icon = "ChatInput"
    name = "ChatInput"

    MAX_FILES = 3  # Define the maximum number of files allowed

    inputs = [
        MultilineInput(
            name="input_value",
            display_name="Text",
            value="",
            info="Message to be passed as input.",
        ),
        BoolInput(
            name="should_store_message",
            display_name="Store Messages",
            info="Store the message in the history.",
            value=True,
            advanced=True,
        ),
        DropdownInput(
            name="sender",
            display_name="Sender Type",
            options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],
            value=MESSAGE_SENDER_USER,
            info="Type of sender.",
            advanced=True,
        ),
        MessageTextInput(
            name="sender_name",
            display_name="Sender Name",
            info="Name of the sender.",
            value=MESSAGE_SENDER_NAME_USER,
            advanced=True,
        ),
        MessageTextInput(
            name="session_id",
            display_name="Session ID",
            info="The session ID of the chat. If empty, the current session ID parameter will be used.",
            advanced=True,
        ),
    ]

    # Dynamically add multiple FileInput fields
    for i in range(1, MAX_FILES + 1):
        inputs.append(
            FileInput(
                name=f"file_{i}",
                display_name=f"File {i}",
                file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,
                info=f"File {i} to be sent with the message.",
                advanced=True,
                is_list=False,  # Each FileInput handles a single file
            )
        )

    outputs = [
        Output(display_name="Message", name="message", method="message_response"),
    ]

    def message_response(self) -> Message:
        # Collect all uploaded files into a list
        files = []
        for i in range(1, self.MAX_FILES + 1):
            file = getattr(self, f"file_{i}", None)
            if file:
                files.append(file)

        # Create the Message object with the list of files
        message = Message(
            text=self.input_value,
            sender=self.sender,
            sender_name=self.sender_name,
            session_id=self.session_id,
            files=files,  # Assign the collected list of files
        )

        # Store the message if required
        if (
            self.session_id
            and isinstance(message, Message)
            and isinstance(message.text, str)
            and self.should_store_message
        ):
            store_message(
                message,
                flow_id=self.graph.flow_id,
            )
            self.message.value = message

        # Update the component status
        self.status = message
        return message