Start by creating a prompt template with name "LC Agent" from within Literal with the following messages:
system You are a helpful assistant
assistant {{chat_history}}
user {{input}}
assistant {{agent_scratchpad}}
Run the following:
# Set env vars:
# OPENAI_API_KEY
# TAVILY_API_KEY
# LITERAL_API_KEY
# LITERAL_API_URL
import os
import getpass
from literalai import LiteralClient
from langchain_openai import ChatOpenAI
from langchain_community.tools.tavily_search import TavilySearchResults
model = ChatOpenAI(model="gpt-4o")
search = TavilySearchResults(max_results=2)
tools = [search]
lai_client = LiteralClient(api_key=os.environ["LITERAL_API_KEY"], url=os.environ["LITERAL_API_URL"])
lai_prompt = lai_client.api.get_prompt(name="LC Agent")
prompt = lai_prompt.to_langchain_chat_prompt_template()
from langchain.agents import create_tool_calling_agent
from langchain.agents import AgentExecutor
agent = create_tool_calling_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
lai_client.reset_context()
cb = lai_client.langchain_callback()
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.runnables.config import RunnableConfig
agent_executor.invoke({
"chat_history": [
# You can specify the intermediary messages as tuples too.
# ("human", "hi! my name is bob"),
# ("ai", "Hello Bob! How can I assist you today?")
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
"input": "whats the weather in sf?"
}, config=RunnableConfig(callbacks=[cb], run_name="Weather SF"))
To test:
Start by creating a prompt template with name "LC Agent" from within Literal with the following messages:
You are a helpful assistant
{{chat_history}}
{{input}}
{{agent_scratchpad}}
Run the following: