Chainlit / chainlit

Build Conversational AI in minutes ⚡️
https://docs.chainlit.io
Apache License 2.0
7.08k stars 928 forks source link

0.7.301 KeyError: 'message' #502

Closed geoHeil closed 1 year ago

geoHeil commented 1 year ago

fails for me with this error

File "/opt/conda/lib/python3.11/site-packages/chainlit/socket.py", line 174, in process_message
2023-10-25T05:47:03.730904154Z     message = await context.emitter.process_user_message(payload)
2023-10-25T05:47:03.730907655Z               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
2023-10-25T05:47:03.730911055Z   File "/opt/conda/lib/python3.11/site-packages/chainlit/emitter.py", line 153, in process_user_message
2023-10-25T05:47:03.730914555Z     message_dict = payload["message"]
2023-10-25T05:47:03.730923955Z                    ~~~~~~~^^^^^^^^^^^
2023-10-25T05:47:03.730927755Z KeyError: 'message'
willydouhard commented 1 year ago

Hell, this is fixed by this PR https://github.com/Chainlit/chainlit/pull/501

willydouhard commented 1 year ago

The fix has been released!

ggnicolau commented 11 months ago

Hi. I can run my app using Chainlit up to version 7.2. After that, I keep getting KeyError: 'message'. I've tried all recent versions and none works.

2023-11-10 16:09:13 - Your app is available at http://localhost:8000/
gio: http://localhost:8000/: Operation not supported
GPU availability: False
2023-11-10 16:09:16 - Load pretrained SentenceTransformer: BAAI/bge-base-en
2023-11-10 16:09:22 - 'message'
Traceback (most recent call last):
File "/home/ggnicolau/miniconda3/envs/langchain/lib/python3.11/site-packages/chainlit/socket.py", line 174, in process_message
message = await context.emitter.process_user_message(payload)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ggnicolau/miniconda3/envs/langchain/lib/python3.11/site-packages/chainlit/emitter.py", line 153, in process_user_message
message_dict = payload["message"]
~~~~~~~^^^^^^^^^^^
KeyError: 'message'

If it helps, I can share my code:

@on_chat_start
def init():
    llm = AzureChatOpenAI(
        deployment_name=Constants.AZURE_OPENAI_DEPLOYMENT_NAME,
        model_name=Constants.AZURE_OPENAI_MODEL_NAME,
        openai_api_base=Constants.AZURE_OPENAI_DEPLOYMENT_ENDPOINT,
        openai_api_version=Constants.AZURE_OPENAI_DEPLOYMENT_VERSION,
        openai_api_key=Constants.AZURE_OPENAI_API_KEY,
        openai_api_type=Constants.AZURE_OPEN_API_TYPE,
        temperature=Constants.TEMPERATURE,
        streaming=True, 
        callbacks=[StreamingStdOutCallbackHandler()],
    )

    print("GPU availability:", torch.cuda.is_available())
    embeddings = HuggingFaceEmbeddings(
        model_name = "BAAI/bge-base-en", 
        model_kwargs = {"device": "cuda" if torch.cuda.is_available() else "cpu"},
        encode_kwargs = {"normalize_embeddings": True} 
    )

    # connect to the vector database
    client = QdrantClient(
            host=Constants.QDRANT_HOSTNAME,
            port=Constants.QDRANT_PORT,
        )

    doc_store = Qdrant(
        client=client, collection_name="plano_diretor", embeddings=embeddings
    )

    retriever = doc_store.as_retriever(
        search_type="similarity_score_threshold",
        search_kwargs={"score_threshold": 0.5, "k": 5},
    ) 

    messages = [
        SystemMessagePromptTemplate.from_template(CustomPrompts.THIRD_CITATION_TEMPLATE)
    ]
    messages.append(HumanMessagePromptTemplate.from_template("{question}"))
    smart_prompt = ChatPromptTemplate.from_messages(messages)

    question_generator = LLMChain(
        llm=llm,
        prompt=CONDENSE_QUESTION_PROMPT,  
        verbose=True,
    )

    doc_chain = load_qa_with_sources_chain(
        llm=llm,
        chain_type="stuff",
        prompt=smart_prompt ,
        verbose=True,
    )

    memory = ConversationKGMemory( 
        llm=llm,
        memory_key="chat_history",
        return_messages=True,
        input_key="question",
        output_key="answer",  #
        max_token_limit=3000,
        k=5,
    )

    conversational_chain = ConversationalRetrievalChain(
        retriever=retriever,
        question_generator=question_generator,
        combine_docs_chain=doc_chain,
        memory=memory,
        rephrase_question=False,
        return_source_documents=True,
        verbose=True,
    )

    # Set chain as a user session variable
    cl.user_session.set("conversation_chain", conversational_chain)

@on_message
async def main(message: str):
    chat_history = []

    # Read chain from user session variable
    chain = cl.user_session.get("conversation_chain")
    # Run the chain asynchronously with an async callback
    res = chain({"question": message, "chat_history": chat_history})

    sources = [doc.metadata.get("source") for doc in res["source_documents"]]

    # Send the answer and the text elements to the UI
    await cl.Message(
        content=f"ANSWER: {res['answer']}, \n\nSOURCES: {set(sources)}"
    ).send()