# %%
from langchain_community.llms import Ollama
llm = Ollama(model="gemma:2b")
llm.invoke("How's the weather today?")
# %%
import json
import requests
response = requests.post(
"http://localhost:11434/api/chat",
json={"model": "gemma:2b", "messages": [{"role": "user", "content": "How's the weather today?"}], "stream": True},
)
response.raise_for_status()
print(response)
for line in response.iter_lines():
str = json.loads(line)
print(str)
# %%
import json
import requests
model = "gemma:2b"
def chat(messages):
response = requests.post(
"http://localhost:11434/api/chat",
json={"model": model, "messages": messages, "stream": True},
)
response.raise_for_status()
output = ""
for line in response.iter_lines():
body = json.loads(line)
if "error" in body:
raise Exception(body["error"])
if body.get("done") is False:
message = body.get("message", "")
content = message.get("content", "")
output += content
# the response streams one token at a time, print that as we receive it
print(content, end="", flush=True)
if body.get("done", False):
message["content"] = output
return message
messages = [{"role": "user", "content":"How is the weather today?"}]
print(chat(messages))
# %%
def chat_with_user():
messages = []
while True:
user_input = input("User > ")
if not user_input:
exit()
print()
messages.append({"role": "user", "content": user_input})
message = chat(messages)
messages.append(message)
print("\n\n")
chat_with_user()
# %%
def chat_with_user_with_prompt():
messages = []
messages.append({"role": "system", "content": "You are world class technical documentation writer."})
while True:
user_input = input("User > ")
if not user_input:
exit()
print()
messages.append({"role": "user", "content": user_input})
message = chat(messages)
messages.append(message)
print("\n\n")
chat_with_user_with_prompt()
# %%
from langchain_community.llms import Ollama
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
llm = Ollama(model="gemma:2b")
prompt = ChatPromptTemplate.from_messages([
("system", "You are world class technical documentation writer. Answer in Korean."),
("user", "{input}")
])
output_parser = StrOutputParser()
chain = prompt | llm
res = chain.invoke({"input": "Tell me about the monitoring roles of infrastructure and application staff for web system operations"})
print(res)
# %%
from langchain_community.llms import Ollama
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
llm = Ollama(model="gemma:2b")
prompt = ChatPromptTemplate.from_messages([
("system", "You are world class technical documentation writer. Answer in Korean."),
("user", "{input}")
])
output_parser = StrOutputParser()
chain = prompt | llm | output_parser
res = chain.invoke({"input": "Tell me about the monitoring roles of infrastructure and application staff for web system operations"})
print(res)
# %%
from langchain_community.llms import Ollama
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
llm = Ollama(model="gemma:2b")
prompt = ChatPromptTemplate.from_messages([
("system", "You're a good friend."),
("placeholder", "{chat_history}"),
("user", "{input}")
])
output_parser = StrOutputParser()
chain = prompt | llm | output_parser
chat_history = []
chat_history.append({"user": "My name is Yongwan."})
res = chain.invoke(
{"input": "Do you remember my name?"},
{"chat_history": chat_history},
)
print(res)
# %%
from langchain_community.llms import Ollama
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.schema.runnable import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain.memory import ConversationSummaryBufferMemory
from langchain_core.messages import HumanMessage, AIMessage
llm = Ollama(model="gemma:2b")
memory = ConversationSummaryBufferMemory(
llm=llm,
max_token_limit=500,
memory_key="chat_history",
return_messages=True,
)
def load_memory(input):
print(input)
return memory.load_memory_variables({})["chat_history"]
prompt = ChatPromptTemplate.from_messages([
MessagesPlaceholder(variable_name="chat_history"),
("system", "You are a helpful AI talking to human."),
("user", "{input}")
])
output_parser = StrOutputParser()
chain = RunnablePassthrough.assign(chat_history=load_memory) | prompt | llm | output_parser
def invoke_chain(question):
result = chain.invoke({"input": question})
memory.save_context(
{"input": question},
{"output": result},
)
print(result)
invoke_chain("My name is Yongwan.")
invoke_chain("What's my name?")
# %%
# LangChain supports many other chat models. Here, we're using Ollama
from langchain_community.chat_models import ChatOllama
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
# supports many more optional parameters. Hover on your `ChatOllama(...)`
# class to view the latest available supported parameters
chat_history = []
llm = ChatOllama(model="gemma:2b", messages=chat_history, verbose=True, stream=True)
prompt = ChatPromptTemplate.from_template("Tell me a short joke about {topic}")
# using LangChain Expressive Language chain syntax
# learn more about the LCEL on
# /docs/expression_language/why
chain = prompt | llm | StrOutputParser()
# for brevity, response is printed in terminal
# You can use LangServe to deploy your application for
# production
print(chain.invoke({"topic": "Space travel"}))
# %%
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain_community.llms import Ollama
from langchain.chains import ConversationChain
llm = Ollama(model="gemma:2b",
temperature=0)
memory = ConversationBufferMemory()
conversation = ConversationChain(
llm=llm,
verbose=True,
memory=memory
)
conversation.invoke(input="Hello")
conversation.invoke(input="My name is Yongwan.")
conversation.invoke(input="Do you remember my name?")
# %%
!pip install langchain_experimental
# %%
from langchain_experimental.llms.ollama_functions import OllamaFunctions
model = OllamaFunctions(model="gemma:2b")
# %%
model = model.bind(
functions=[
{
"name": "get_current_time",
"description": "Get the current time.",
"parameters": {
"type": "object",
"properties": {},
"required": []
}
},
{
"name": "get_current_location",
"description": "Get the current location like latitude and longitude.",
"parameters": {
"type": "object",
"properties": {},
"required": []
}
},
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, " "e.g. San Francisco, CA",
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["location"],
},
}
],
# function_call={"name":"get_current_location"},
)
# %%
import json
from langchain_core.messages import HumanMessage
# model.input_schema.schema()
# model.output_schema.schema()
response = model.invoke("Can I get the latitude and longitude of this location?")
# response = model.invoke([HumanMessage(content="Can I get the latitude and longitude of this location?")])
# model.invoke("what is the weather in Boston?")
# model.invoke("How is the weather here?")
# model.invoke("what time is it now?")
print(response)
response_message = response.additional_kwargs
function_name = response_message["function_call"]["name"]
function_arguments = json.loads(response_message["function_call"]["arguments"])
print(function_name)
print(function_arguments)
print(response_message.get("function_call"))
# if response_message.get("function_call") == None:
# function_name = response_message["function_call"]["name"]
# function_arguments = json.loads(response_message["function_call"]["arguments"])
# print(function_name)
# # 함수를 호출해서, 그 결과를 얻어온다.
# # function_response = toolkits[function_name](**function_arguments)
# %%
system_message = SystemMessage(content="You are a friendly assistant.")
human_message = HumanMessage(content="Who are you?")
chat.invoke([system_message, human_message])
chat.invoke([system_message, human_message], temperature=0.7, max_tokens=10, top_p=0.95)
# %%
import sys
from langchain_community.llms import Ollama
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
llm = Ollama(
model="gemma:2b",
temperature=0.7)
prompt = ChatPromptTemplate.from_messages([
("system", "You are world class technical documentation writer. Answer in Korean."),
("user", "{input}")
])
output_parser = StrOutputParser()
chain = prompt | llm | output_parser
print(chain.invoke({"input": "hello how are you"}))
for chunk in chain.stream({"input": "hello how are you"}):
sys.stdout.write(chunk)
sys.stdout.flush()
# print(chunk, end="", flush=True)
# %%
import sys
from langchain_community.llms import Ollama
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
llm = Ollama(
model="gemma:2b",
verbose=True,
temperature=0,
callbacks=CallbackManager([StreamingStdOutCallbackHandler()])
)
prompt = ChatPromptTemplate.from_messages([
("system", "You are world class technical documentation writer. Answer in Korean."),
("user", "{input}")
])
output_parser = StrOutputParser()
chain = prompt | llm | output_parser
chain.invoke({"input": "hello how are you"})