Run the following snippet against a Literal AI project and notice token counts:
import os
from literalai import LiteralClient
from langchain_anthropic import ChatAnthropic
from langchain.schema.runnable.config import RunnableConfig
from langchain.schema import StrOutputParser
from langchain.prompts import ChatPromptTemplate
Set ANTHROPIC_API_KEY
literal_client = LiteralClient(api_key="lsk-***")
cb = literal_client.langchain_callback()
prompt = ChatPromptTemplate.from_messages(
['human', 'Tell me a short joke about {topic}']
)
model = ChatAnthropic(model_name="claude-3-5-sonnet-20240620")
runnable = prompt | model
res = runnable.invoke(
{"topic": "ice cream"},
config=RunnableConfig(callbacks=[cb], run_name="joke")
)
Changes:
To test:
from langchain_anthropic import ChatAnthropic from langchain.schema.runnable.config import RunnableConfig from langchain.schema import StrOutputParser from langchain.prompts import ChatPromptTemplate
Set ANTHROPIC_API_KEY
literal_client = LiteralClient(api_key="lsk-***")
cb = literal_client.langchain_callback()
prompt = ChatPromptTemplate.from_messages( ['human', 'Tell me a short joke about {topic}'] )
model = ChatAnthropic(model_name="claude-3-5-sonnet-20240620") runnable = prompt | model
res = runnable.invoke( {"topic": "ice cream"}, config=RunnableConfig(callbacks=[cb], run_name="joke") )