Chainlit / chainlit

Build Conversational AI in minutes ⚡️
https://docs.chainlit.io
Apache License 2.0
6.67k stars 860 forks source link

`list index out of range` after upgrade to 0.7.0 #403

Closed anyoneai closed 11 months ago

anyoneai commented 11 months ago

Hi! I was using chainlit==0.6.1 and langchain==0.0.281 with an Agent and one Tool without issues but after I upgraded to 0.7.0 I always get "list index out of range", I've tried making changes to how I call the agent but nothing works, this is my code:

@cl.on_chat_start
def main():
    # Instantiate the chain for that user session
    agent_exc = AgentExecutor(
        agent=agent,
        tools=tools,
        memory=memory,
        verbose=True,
        return_intermediate_steps=False,
    )
    # Store the chain in the user session
    cl.user_session.set("agent_exc", agent_exc)

@cl.on_message
async def main(message: str):
    # Retrieve the chain from the user session
    agent_exc = cl.user_session.get("agent_exc")

    cb = cl.LangchainCallbackHandler(stream_final_answer=True)
    await cl.make_async(agent_exc.run)(message, callbacks=[cb])

I removed the code for the agent, tools, etc to make it shorter but:

If I chat with the agent without using the tool, then works fine, when it tries to retrieve documents using the tool the following error happens:

backend_1  | 2023-09-19 03:33:19 - Error in LangchainCallbackHandler.on_chat_model_start callback: list index out of range
backend_1  | 2023-09-19 03:33:22 - list index out of range
backend_1  | Traceback (most recent call last):
backend_1  |   File "/usr/local/lib/python3.10/site-packages/chainlit/utils.py", line 39, in wrapper
backend_1  |     return await user_function(**params_values)
backend_1  |   File "/home/app/src/app.py", line 38, in main
backend_1  |     await cl.make_async(user_assistant.run)(message, callbacks=[cb])
backend_1  |   File "/usr/local/lib/python3.10/site-packages/asyncer/_main.py", line 358, in wrapper
backend_1  |     return await anyio.to_thread.run_sync(
backend_1  |   File "/usr/local/lib/python3.10/site-packages/anyio/to_thread.py", line 33, in run_sync
backend_1  |     return await get_asynclib().run_sync_in_worker_thread(
backend_1  |   File "/usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 877, in run_sync_in_worker_thread
backend_1  |     return await future
backend_1  |   File "/usr/local/lib/python3.10/asyncio/futures.py", line 285, in __await__
backend_1  |     yield self  # This tells Task to wait for completion.
backend_1  |   File "/usr/local/lib/python3.10/asyncio/tasks.py", line 304, in __wakeup
backend_1  |     future.result()
backend_1  |   File "/usr/local/lib/python3.10/asyncio/futures.py", line 201, in result
backend_1  |     raise self._exception.with_traceback(self._exception_tb)
backend_1  |   File "/usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 807, in run
backend_1  |     result = context.run(func, *args)
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/chains/base.py", line 487, in run
backend_1  |     return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/chains/base.py", line 292, in __call__
backend_1  |     raise e
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/chains/base.py", line 286, in __call__
backend_1  |     self._call(inputs, run_manager=run_manager)
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/agents/agent.py", line 1122, in _call
backend_1  |     next_step_output = self._take_next_step(
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/agents/agent.py", line 977, in _take_next_step
backend_1  |     observation = tool.run(
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/tools/base.py", line 356, in run
backend_1  |     raise e
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/tools/base.py", line 328, in run
backend_1  |     self._run(*tool_args, run_manager=run_manager, **tool_kwargs)
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/tools/base.py", line 499, in _run
backend_1  |     self.func(
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/schema/retriever.py", line 208, in get_relevant_documents
backend_1  |     raise e
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/schema/retriever.py", line 201, in get_relevant_documents
backend_1  |     result = self._get_relevant_documents(
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/retrievers/self_query/base.py", line 119, in _get_relevant_documents
backend_1  |     self.llm_chain.predict_and_parse(
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/chains/llm.py", line 284, in predict_and_parse
backend_1  |     result = self.predict(callbacks=callbacks, **kwargs)
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/chains/llm.py", line 257, in predict
backend_1  |     return self(kwargs, callbacks=callbacks)[self.output_key]
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/chains/base.py", line 292, in __call__
backend_1  |     raise e
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/chains/base.py", line 286, in __call__
backend_1  |     self._call(inputs, run_manager=run_manager)
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/chains/llm.py", line 93, in _call
backend_1  |     response = self.generate([inputs], run_manager=run_manager)
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/chains/llm.py", line 103, in generate
backend_1  |     return self.llm.generate_prompt(
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/chat_models/base.py", line 414, in generate_prompt
backend_1  |     return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/chat_models/base.py", line 292, in generate
backend_1  |     run_managers = callback_manager.on_chat_model_start(
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/callbacks/manager.py", line 1199, in on_chat_model_start
backend_1  |     _handle_event(
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/callbacks/manager.py", line 403, in _handle_event
backend_1  |     raise e
backend_1  |   File "/usr/local/lib/python3.10/site-packages/langchain/callbacks/manager.py", line 379, in _handle_event
backend_1  |     getattr(handler, event_name)(*args, **kwargs)
backend_1  |   File "/usr/local/lib/python3.10/site-packages/chainlit/langchain/callbacks.py", line 410, in on_chat_model_start
backend_1  |     _on_chat_model_start(self, serialized, messages, **kwargs)
backend_1  |   File "/usr/local/lib/python3.10/site-packages/chainlit/langchain/callbacks.py", line 316, in _on_chat_model_start
backend_1  |     formatted_message = formatted_messages[
backend_1  | IndexError: list index out of range
willydouhard commented 11 months ago

Can you share the agent instantiation code? Would be easier to reproduce

anyoneai commented 11 months ago

Okay, I've removed the prompts bc of privacy but the all the code is here:

from config import settings
from langchain.agents import AgentExecutor
from langchain.agents.agent_toolkits import create_retriever_tool
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
    AgentTokenBufferMemory,
)
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts import MessagesPlaceholder
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.schema.messages import SystemMessage
from langchain.vectorstores import Chroma

llm = ChatOpenAI(
    model_name=settings.MODEL_NAME,
    temperature=settings.TEMPERATURE,
    streaming=True,
    verbose=True,
)
vectorstore = Chroma(
    collection_name=settings.CHROMA_COLLECTION,
    embedding_function=OpenAIEmbeddings(),
    persist_directory="chroma/",
)
metadata_field_info = [
    AttributeInfo(
        name="Age",
        description="The age of the person.",
        type="int or float",
    ),
    AttributeInfo(
        name="I'm_from",
        description="The country the person is from.",
        type="string",
    ),
]
document_content_description = (
    "..."
)
retriever = SelfQueryRetriever.from_llm(
    llm,
    vectorstore,
    document_content_description,
    metadata_field_info,
    verbose=True,
)
info_retriever_tool = create_retriever_tool(
    retriever,
    "search_db",
    (
        "..."
    ),
)

tools = [info_retriever_tool]

memory_key = "history"
memory = AgentTokenBufferMemory(
    memory_key=memory_key,
    llm=llm,
    max_token_limit=settings.CHAT_MEMMORY_MAX_TOKENS,
)

system_message = SystemMessage(content=("...."))
prompt = OpenAIFunctionsAgent.create_prompt(
    system_message=system_message,
    extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)],
)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)

agent_exc = AgentExecutor(
    agent=agent,
    tools=tools,
    memory=memory,
    verbose=True,
    return_intermediate_steps=False,
)

It looks like SelfQueryRetriever doesn't support async, but with the previous chainlit version it was working fine...

willydouhard commented 11 months ago

Thank you, I slightly changed the example to reproduce:

from langchain.agents import AgentExecutor
from langchain.agents.agent_toolkits import create_retriever_tool
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
    AgentTokenBufferMemory,
)
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts import MessagesPlaceholder
from langchain.schema.messages import SystemMessage
from langchain.agents.agent_toolkits import create_retriever_tool
from langchain.document_loaders import TextLoader
from langchain.vectorstores import FAISS
from langchain.text_splitter import CharacterTextSplitter

import chainlit as cl

llm = ChatOpenAI(
    streaming=True,
)

loader = TextLoader("./state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(texts, embeddings)

retriever = db.as_retriever()

tool = create_retriever_tool(
    retriever,
    "search_state_of_union",
    "Searches and returns documents regarding the state-of-the-union.",
)
tools = [tool]

memory_key = "history"
memory = AgentTokenBufferMemory(
    memory_key=memory_key,
    llm=llm,
    max_token_limit=4000,
)

system_message = SystemMessage(
    content=(
        "Do your best to answer the questions. "
        "Feel free to use any tools available to look up "
        "relevant information, only if neccessary"
    )
)
prompt = OpenAIFunctionsAgent.create_prompt(
    system_message=system_message,
    extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)],
)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)

@cl.on_chat_start
def main():
    # Instantiate the chain for that user session
    agent_exc = AgentExecutor(
        agent=agent,
        tools=tools,
        memory=memory,
        return_intermediate_steps=True,
    )
    # Store the chain in the user session
    cl.user_session.set("agent_exc", agent_exc)

@cl.on_message
async def main(message: str):
    # Retrieve the chain from the user session
    agent_exc = cl.user_session.get("agent_exc")

    cb = cl.LangchainCallbackHandler(stream_final_answer=True)
    await cl.make_async(agent_exc)(message, callbacks=[cb])
Screenshot 2023-09-19 at 18 45 17

This works well on my machine using langchain 0.0.295 and chainlit 0.7.0.

anyoneai commented 11 months ago

Could you please try using SelfQueryRetriever? I think the root issue is there but you are using a different retriever

willydouhard commented 11 months ago

Yes, I was able to reproduce with SelfQueryRetriever. Opened #406 to fix it. Until it gets released you can manually replace this file in your local installation!

willydouhard commented 11 months ago

Should be fixed with the 0.7.1 version