Open ywbestPark opened 2 months ago
from langchain_community.llms import Ollama
from langchain_core.prompts import ChatPromptTemplate
llm = Ollama(model="gemma:2b")
prompt = ChatPromptTemplate.from_messages([
("system", "You are world class technical documentation writer."),
("user", "{input}")
])
chain = prompt | llm
res = chain.invoke({"input": "how can langsmith help with testing?"})
print(res)
from langchain_community.llms import Ollama
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
llm = Ollama(model="gemma:2b")
prompt = ChatPromptTemplate.from_messages([
("system", "You are world class technical documentation writer."),
("user", "{input}")
])
output_parser = StrOutputParser()
chain = prompt | llm | output_parser
res = chain.invoke({"input": "how can langsmith help with testing?"})
print(res)
from langchain_community.document_loaders import WebBaseLoader
loader = WebBaseLoader("https://docs.smith.langchain.com/user_guide")
docs = loader.load()
print(docs)
※ 필요 라이브러리 설치 : pip install faiss-cpu
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = WebBaseLoader("https://docs.smith.langchain.com/user_guide")
docs = loader.load()
embeddings = OllamaEmbeddings()
text_splitter = RecursiveCharacterTextSplitter()
documents = text_splitter.split_documents(docs)
vector = FAISS.from_documents(documents, embeddings)
※ pip install --upgrade langchain ※ pip install faiss-cpu ※ pip install sentence-transformers
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.llms import Ollama
from langchain_core.prompts import ChatPromptTemplate
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
import time
start_time = time.time() # 시작 시간 기록
prompt = ChatPromptTemplate.from_template("""Answer the following question based only on the provided context:
<context>
{context}
</context>
Question: {input}""")
llm = Ollama(model="gemma:2b")
document_chain = create_stuff_documents_chain(llm, prompt)
loader = WebBaseLoader("https://docs.smith.langchain.com/user_guide")
docs = loader.load()
# embeddings = OllamaEmbeddings()
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
text_splitter = RecursiveCharacterTextSplitter()
documents = text_splitter.split_documents(docs)
vector = FAISS.from_documents(documents, embeddings)
retriever = vector.as_retriever()
retrieval_chain = create_retrieval_chain(retriever, document_chain)
response = retrieval_chain.invoke({"input": "how can langsmith help with testing?"})
print(response["answer"])
end_time = time.time() # 종료 시간 기록
print(f"Execution time: {end_time - start_time} seconds")
※ pip install chromadb
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_community.llms import Ollama
from langchain_core.prompts import ChatPromptTemplate
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
import time
start_time = time.time() # 시작 시간 기록
prompt = ChatPromptTemplate.from_template("""Never give arbitrary responses. Answer the following question based only on the provided context:
<context>
{context}
</context>
Question: {input}""")
llm = Ollama(model="gemma:2b")
document_chain = create_stuff_documents_chain(llm, prompt)
loader = TextLoader('korea_constitution.txt.bak', encoding = 'UTF-8')
docs = loader.load()
#print(docs)
embeddings = OllamaEmbeddings()
#embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(docs)
vector = Chroma.from_documents(documents, embeddings)
print(len(documents))
retriever = vector.as_retriever(search_kwargs={"k": 22})
retrieval_chain = create_retrieval_chain(retriever, document_chain)
response = retrieval_chain.invoke({"input": "대통령 임기는?"})
print(response["answer"])
end_time = time.time() # 종료 시간 기록
print(f"Execution time: {end_time - start_time} seconds")
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_community.llms import Ollama
from langchain_core.prompts import ChatPromptTemplate
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
import time
start_time = time.time() # 시작 시간 기록
prompt = ChatPromptTemplate.from_template("""Never give arbitrary responses. Answer the following question based only on the provided context:
<context>
{context}
</context>
Question: {input}""")
llm = Ollama(model="gemma:2b")
document_chain = create_stuff_documents_chain(llm, prompt)
loader = TextLoader('korea_constitution.txt.bak', encoding = 'UTF-8')
docs = loader.load()
#print(docs)
embeddings = OllamaEmbeddings()
#embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(docs)
vector = Chroma.from_documents(documents, embeddings)
print(len(documents))
retriever = vector.as_retriever(search_kwargs={"k": 22})
retrieval_chain = create_retrieval_chain(retriever, document_chain)
response = retrieval_chain.invoke({"input": "대통령 임기는?"})
print(response["answer"])
end_time = time.time() # 종료 시간 기록
print(f"Execution time: {end_time - start_time} seconds")
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.llms import Ollama
from langchain_core.prompts import ChatPromptTemplate
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
import time
start_time = time.time() # 시작 시간 기록
prompt = ChatPromptTemplate.from_template("""Answer the following question based only on the provided context:
<context>
{context}
</context>
Question: {input}""")
llm = Ollama(model="gemma:2b")
document_chain = create_stuff_documents_chain(llm, prompt)
loader = WebBaseLoader("https://docs.smith.langchain.com/user_guide")
loader = WebBaseLoader("https://raw.githubusercontent.com/puzzlet/constitution-kr/master/%EB%8C%80%ED%95%9C%EB%AF%BC%EA%B5%AD%20%ED%97%8C%EB%B2%95.txt")
docs = loader.load()
embeddings = OllamaEmbeddings()
# embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
text_splitter = RecursiveCharacterTextSplitter()
documents = text_splitter.split_documents(docs)
vector = FAISS.from_documents(documents, embeddings)
retriever = vector.as_retriever()
retrieval_chain = create_retrieval_chain(retriever, document_chain)
# response = retrieval_chain.invoke({"input": "how can langsmith help with testing?"})
response = retrieval_chain.invoke({"input": "대통령 임기는?"})
print(response["answer"])
end_time = time.time() # 종료 시간 기록
print(f"Execution time: {end_time - start_time} seconds")
1. 참고 싸이트
랭체인 https://python.langchain.com/docs/get_started/introduction/
랭체인 퀵 스타트 https://python.langchain.com/docs/get_started/quickstart/
올라마 설치 https://ollama.com/download
올라마 깃헙 https://github.com/ollama/ollama/tree/main
번역 https://www.deepl.com/translator
한국어 잘되는 LLM https://huggingface.co/heegyu/EEVE-Korean-Instruct-10.8B-v1.0-GGUF
2. 올라마 설치 후 디렉토리 구조
3. 로컬 LLM 다운 받기
4. Visual Studio Code 실행 후 가상 환경 실행
5. 단계별 실행을 위해 .ipynb 형식으로 파일 만들기
6. 필요한 라이브러리 설치