Closed anyoneai closed 11 months ago
Can you share the agent instantiation code? Would be easier to reproduce
Okay, I've removed the prompts bc of privacy but the all the code is here:
from config import settings
from langchain.agents import AgentExecutor
from langchain.agents.agent_toolkits import create_retriever_tool
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
AgentTokenBufferMemory,
)
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts import MessagesPlaceholder
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.schema.messages import SystemMessage
from langchain.vectorstores import Chroma
llm = ChatOpenAI(
model_name=settings.MODEL_NAME,
temperature=settings.TEMPERATURE,
streaming=True,
verbose=True,
)
vectorstore = Chroma(
collection_name=settings.CHROMA_COLLECTION,
embedding_function=OpenAIEmbeddings(),
persist_directory="chroma/",
)
metadata_field_info = [
AttributeInfo(
name="Age",
description="The age of the person.",
type="int or float",
),
AttributeInfo(
name="I'm_from",
description="The country the person is from.",
type="string",
),
]
document_content_description = (
"..."
)
retriever = SelfQueryRetriever.from_llm(
llm,
vectorstore,
document_content_description,
metadata_field_info,
verbose=True,
)
info_retriever_tool = create_retriever_tool(
retriever,
"search_db",
(
"..."
),
)
tools = [info_retriever_tool]
memory_key = "history"
memory = AgentTokenBufferMemory(
memory_key=memory_key,
llm=llm,
max_token_limit=settings.CHAT_MEMMORY_MAX_TOKENS,
)
system_message = SystemMessage(content=("...."))
prompt = OpenAIFunctionsAgent.create_prompt(
system_message=system_message,
extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)],
)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
agent_exc = AgentExecutor(
agent=agent,
tools=tools,
memory=memory,
verbose=True,
return_intermediate_steps=False,
)
It looks like SelfQueryRetriever
doesn't support async, but with the previous chainlit version it was working fine...
Thank you, I slightly changed the example to reproduce:
from langchain.agents import AgentExecutor
from langchain.agents.agent_toolkits import create_retriever_tool
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
AgentTokenBufferMemory,
)
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts import MessagesPlaceholder
from langchain.schema.messages import SystemMessage
from langchain.agents.agent_toolkits import create_retriever_tool
from langchain.document_loaders import TextLoader
from langchain.vectorstores import FAISS
from langchain.text_splitter import CharacterTextSplitter
import chainlit as cl
llm = ChatOpenAI(
streaming=True,
)
loader = TextLoader("./state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(texts, embeddings)
retriever = db.as_retriever()
tool = create_retriever_tool(
retriever,
"search_state_of_union",
"Searches and returns documents regarding the state-of-the-union.",
)
tools = [tool]
memory_key = "history"
memory = AgentTokenBufferMemory(
memory_key=memory_key,
llm=llm,
max_token_limit=4000,
)
system_message = SystemMessage(
content=(
"Do your best to answer the questions. "
"Feel free to use any tools available to look up "
"relevant information, only if neccessary"
)
)
prompt = OpenAIFunctionsAgent.create_prompt(
system_message=system_message,
extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)],
)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
@cl.on_chat_start
def main():
# Instantiate the chain for that user session
agent_exc = AgentExecutor(
agent=agent,
tools=tools,
memory=memory,
return_intermediate_steps=True,
)
# Store the chain in the user session
cl.user_session.set("agent_exc", agent_exc)
@cl.on_message
async def main(message: str):
# Retrieve the chain from the user session
agent_exc = cl.user_session.get("agent_exc")
cb = cl.LangchainCallbackHandler(stream_final_answer=True)
await cl.make_async(agent_exc)(message, callbacks=[cb])
This works well on my machine using langchain 0.0.295
and chainlit 0.7.0
.
Could you please try using SelfQueryRetriever
? I think the root issue is there but you are using a different retriever
Yes, I was able to reproduce with SelfQueryRetriever
. Opened #406 to fix it. Until it gets released you can manually replace this file in your local installation!
Should be fixed with the 0.7.1 version
Hi! I was using
chainlit==0.6.1
andlangchain==0.0.281
with an Agent and one Tool without issues but after I upgraded to 0.7.0 I always get "list index out of range", I've tried making changes to how I call the agent but nothing works, this is my code:I removed the code for the agent, tools, etc to make it shorter but:
OpenAIFunctionsAgent
SelfQueryRetriever
Chroma
ChatOpenAI
, using gpt-4If I chat with the agent without using the tool, then works fine, when it tries to retrieve documents using the tool the following error happens: