以下是我加载模型出现的错误
root@r98c42b95ad047a39682a5cc9c547b94-task0-0:/tmp/code/langchain-chatglm-webui# python app.py
开始加载模型配置
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py:1383: UserWarning: positional arguments and argument "destination" are deprecated. nn.Module.state_dict will not accept them in the future. Refer to https://pytorch.org/docs/master/generated/torch.nn.Module.html#torch.nn.Module.state_dict for details.
warnings.warn(
'HTTPSConnectionPool(host='huggingface.co', port=443): Max retries exceeded with url: /THUDM/chatglm2-6b/resolve/main/tokenizer_config.json (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7fe52298f940>, 'Connection to huggingface.co timed out. (connect timeout=10)'))' thrown while requesting HEAD https://huggingface.co/THUDM/chatglm2-6b/resolve/main/tokenizer_config.json
'HTTPSConnectionPool(host='huggingface.co', port=443): Max retries exceeded with url: /THUDM/chatglm2-6b/resolve/main/config.json (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7fe52298fe50>, 'Connection to huggingface.co timed out. (connect timeout=10)'))' thrown while requesting HEAD https://huggingface.co/THUDM/chatglm2-6b/resolve/main/config.json
加载模型出错: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like THUDM/chatglm2-6b is not the path to a directory containing a file named config.json.
Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'.
Running on local URL: http://0.0.0.0:7860
Could not create share link. Please check your internet connection or our status page: https://status.gradio.app
///
///
///
///
///
///
///
///
///
///
///
///
///
///
以下是我目前的python app.py代码
希望高人帮助!!!感谢!
import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import FAISS
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
import torch
以下是我加载模型出现的错误 root@r98c42b95ad047a39682a5cc9c547b94-task0-0:/tmp/code/langchain-chatglm-webui# python app.py 开始加载模型配置 /usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py:1383: UserWarning: positional arguments and argument "destination" are deprecated. nn.Module.state_dict will not accept them in the future. Refer to https://pytorch.org/docs/master/generated/torch.nn.Module.html#torch.nn.Module.state_dict for details. warnings.warn( 'HTTPSConnectionPool(host='huggingface.co', port=443): Max retries exceeded with url: /THUDM/chatglm2-6b/resolve/main/tokenizer_config.json (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7fe52298f940>, 'Connection to huggingface.co timed out. (connect timeout=10)'))' thrown while requesting HEAD https://huggingface.co/THUDM/chatglm2-6b/resolve/main/tokenizer_config.json 'HTTPSConnectionPool(host='huggingface.co', port=443): Max retries exceeded with url: /THUDM/chatglm2-6b/resolve/main/config.json (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7fe52298fe50>, 'Connection to huggingface.co timed out. (connect timeout=10)'))' thrown while requesting HEAD https://huggingface.co/THUDM/chatglm2-6b/resolve/main/config.json 加载模型出错: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like THUDM/chatglm2-6b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. Running on local URL: http://0.0.0.0:7860
Could not create share link. Please check your internet connection or our status page: https://status.gradio.app
/// /// /// /// /// /// /// /// /// /// /// /// /// /// 以下是我目前的python app.py代码 希望高人帮助!!!感谢! import os from typing import List
import gradio as gr import nltk import sentence_transformers from duckduckgo_search import ddg from duckduckgo_search.utils import SESSION from langchain.chains import RetrievalQA from langchain.document_loaders import UnstructuredFileLoader from langchain.embeddings.huggingface import HuggingFaceEmbeddings from langchain.prompts import PromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.vectorstores import FAISS
from chatllm import ChatLLM from chinese_text_splitter import ChineseTextSplitter from config import * import torch
nltk.data.path = [os.path.join(os.path.dirname(file), "nltk_data") ] + nltk.data.path
embedding_model_dict = embedding_model_dict llm_model_dict = llm_model_dict EMBEDDING_DEVICE = EMBEDDING_DEVICE LLM_DEVICE = LLM_DEVICE num_gpus = num_gpus init_llm = init_llm init_embedding_model = init_embedding_model
llm_model_list = [] llm_model_dict = llm_model_dict
for i in llm_model_dict: for j in llm_model_dict[i]: llm_model_list.append(j)
def search_web(query):
class KnowledgeBasedChatLLM:
)
def update_status(history, status): history = history + [[None, status]] print(status) return history
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
def init_model(): try: print("开始加载模型配置") knowladge_based_chat_llm.init_model_config() print("模型配置加载成功") knowladge_based_chat_llm.llm._call("你好") return """初始模型已成功加载,可以开始对话""" except Exception as e: print(f"加载模型出错: {e}") # 打印详细的异常信息 return """模型未成功加载,请重新选择模型后点击"重新加载模型"按钮"""
def clear_session(): return '', None
def reinit_model(large_language_model, embedding_model, history): try: knowladge_based_chat_llm.init_model_config( large_language_model=large_language_model, embedding_model=embedding_model) model_status = """模型已成功重新加载,可以开始对话""" except Exception as e:
def init_vector_store(file_obj):
def predict(input, use_web, top_k, history_len, temperature, top_p, history=None): if history == None: history = []
model_status = init_model()
if name == "main": block = gr.Blocks() with block as demo: