# 原代码
# tokenizer, model = get_model(args)
# 修改代码
model_path = "D:/model/codegeex2-6b"
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModel.from_pretrained(model_path, trust_remote_code=True)
报错:
fastllm disabled.
[WARN] chatglm-cpp not found. Install it by pip install chatglm-cpp for better performance. Check out https://github.com/li-plus/chatglm.cpp for more details.
Traceback (most recent call last):
File "D:\CodeGeeX2\demo\run_demo_local.py", line 367, in
main()
File "D:\CodeGeeX2\demo\run_demo_local.py", line 236, in main
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
File "D:\CodeGeeX2\venv\lib\site-packages\transformers\models\auto\tokenization_auto.py", line 689, in from_pretrained
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, *kwargs)
File "D:\CodeGeeX2\venv\lib\site-packages\transformers\tokenization_utils_base.py", line 1841, in from_pretrained
return cls._from_pretrained(
File "D:\CodeGeeX2\venv\lib\site-packages\transformers\tokenization_utils_base.py", line 2004, in _from_pretrained
tokenizer = cls(init_inputs, **init_kwargs)
File "C:\Users\scx56/.cache\huggingface\modules\transformers_modules\codegeex2-6b\tokenization_chatglm.py", line 73, in init
self.tokenizer = SPTokenizer(vocab_file)
File "C:\Users\scx56/.cache\huggingface\modules\transformers_modules\codegeex2-6b\tokenization_chatglm.py", line 13, in init
assert os.path.isfile(model_path), model_path
File "D:\CodeGeeX2\venv\lib\genericpath.py", line 30, in isfile
st = os.stat(path)
TypeError: stat: path should be string, bytes, os.PathLike or integer, not NoneType
下载模型及代码后按照教程修改run_demo.py def main(): parser = argparse.ArgumentParser() parser = add_code_generationargs(parser) args, = parser.parse_known_args()
报错: fastllm disabled. [WARN] chatglm-cpp not found. Install it by
main()
File "D:\CodeGeeX2\demo\run_demo_local.py", line 236, in main
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
File "D:\CodeGeeX2\venv\lib\site-packages\transformers\models\auto\tokenization_auto.py", line 689, in from_pretrained
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, *kwargs)
File "D:\CodeGeeX2\venv\lib\site-packages\transformers\tokenization_utils_base.py", line 1841, in from_pretrained
return cls._from_pretrained(
File "D:\CodeGeeX2\venv\lib\site-packages\transformers\tokenization_utils_base.py", line 2004, in _from_pretrained
tokenizer = cls(init_inputs, **init_kwargs)
File "C:\Users\scx56/.cache\huggingface\modules\transformers_modules\codegeex2-6b\tokenization_chatglm.py", line 73, in init
self.tokenizer = SPTokenizer(vocab_file)
File "C:\Users\scx56/.cache\huggingface\modules\transformers_modules\codegeex2-6b\tokenization_chatglm.py", line 13, in init
assert os.path.isfile(model_path), model_path
File "D:\CodeGeeX2\venv\lib\genericpath.py", line 30, in isfile
st = os.stat(path)
TypeError: stat: path should be string, bytes, os.PathLike or integer, not NoneType
pip install chatglm-cpp
for better performance. Check out https://github.com/li-plus/chatglm.cpp for more details. Traceback (most recent call last): File "D:\CodeGeeX2\demo\run_demo_local.py", line 367, in