THUDM / GLM-4

GLM-4 series: Open Multilingual Multimodal Chat LMs | 开源多语言多模态对话模型
Apache License 2.0
4.74k stars 385 forks source link

用单机双卡lora微调后生成的checkpoint-2500后启动报错,求解 #249

Closed zycovoo closed 3 months ago

zycovoo commented 3 months ago

这是在openai_api_server.python修改的代码 MODEL_PATH = os.environ.get('MODEL_PATH', '/jcdata/llm/chatglm4/finetune_demo/output/checkpoint-2500') MAX_MODEL_LENGTH = 16384

ModelType = Union[PreTrainedModel, PeftModelForCausalLM] TokenizerType = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]

def load_model_and_tokenizer( model_dir: Union[str, Path], trust_remote_code: bool = True ) -> tuple[ModelType, TokenizerType]: model_dir = Path(model_dir).expanduser().resolve() if (model_dir / 'adapter_config.json').exists(): model = AutoPeftModelForCausalLM.from_pretrained( model_dir, trust_remote_code=trust_remote_code, device_map='auto' ) tokenizer_dir = model.peft_config['default'].base_model_name_or_path else: model = AutoModelForCausalLM.from_pretrained( model_dir, trust_remote_code=trust_remote_code, device_map='auto' ) tokenizer_dir = model_dir tokenizer = AutoTokenizer.from_pretrained( tokenizer_dir, trust_remote_code=trust_remote_code ) return model, tokenizer

if name == "main":

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True, device_map="auto")

tokenizer = load_model_and_tokenizer(MODEL_PATH, trust_remote_code=True)
engine_args = AsyncEngineArgs(
    model=MODEL_PATH,
    tokenizer=MODEL_PATH,
   # model=load_model_and_tokenizer(MODEL_PATH, trust_remote_code=True),
   # tokenizer=load_model_and_tokenizer(MODEL_PATH, trust_remote_code=True),
    # 如果你有多张显卡,可以在这里设置成你的显卡数量
    tensor_parallel_size=2,
    dtype="bfloat16",
    trust_remote_code=True,
    # 占用显存的比例,请根据你的显卡显存大小设置合适的值,例如,如果你的显卡有80G,您只想使用24G,请按照24/80=0.3设置
    gpu_memory_utilization=0.9,
    enforce_eager=True,
    worker_use_ray=False,
    engine_use_ray=False,
    disable_log_requests=True,
    max_model_len=MAX_MODEL_LENGTH,
)
engine = AsyncLLMEngine.from_engine_args(engine_args)
uvicorn.run(app, host='0.0.0.0', port=8200, workers=1)

这是报错信息: Traceback (most recent call last): File "/root/miniconda3/envs/glm4/lib/python3.10/site-packages/transformers/utils/hub.py", line 398, in cached_file resolved_file = hf_hub_download( File "/root/miniconda3/envs/glm4/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn validate_repo_id(arg_value) File "/root/miniconda3/envs/glm4/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id raise HFValidationError( huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '(PeftModelForCausalLM( (base_model): LoraModel( (model): ChatGLMForConditionalGeneration( (transformer): ChatGLMModel( (embedding): Embedding( (word_embeddings): Embedding(151552, 4096) ) (rotary_pos_emb): RotaryEmbedding() (encoder): GLMTransformer( (layers): ModuleList( (0-39): 40 x GLMBlock( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (query_key_value): lora.Linear( (base_layer): Linear(in_features=4096, out_features=4608, bias=True) (lora_dropout): ModuleDict( (default): Dropout(p=0.1, inplace=False) ) (lora_A): ModuleDict( (default): Linear(in_features=4096, out_features=8, bias=False) ) (lora_B): ModuleDict( (default): Linear(in_features=8, out_features=4608, bias=False) ) (lora_embedding_A): ParameterDict() (lora_embedding_B): ParameterDict() ) (core_attention): SdpaAttention( (attention_dropout): Dropout(p=0.0, inplace=False) ) (dense): Linear(in_features=4096, out_features=4096, bias=False) ) (post_attention_layernorm): RMSNorm() (mlp): MLP( (dense_h_to_4h): Linear(in_features=4096, out_features=27392, bias=False) (dense_4h_to_h): Linear(in_features=13696, out_features=4096, bias=False) ) ) ) (final_layernorm): RMSNorm() ) (output_layer): Linear(in_features=4096, out_features=151552, bias=False) ) ) ) ), ChatGLM4Tokenizer(name_or_path='/jcdata/llm/chatglm4/glm-4-9b-chat/', vocab_size=151329, model_max_length=128000, is_fast=False, padding_side='left', truncation_side='right', special_tokens={'eos_token': '<|endoftext|>', 'pad_token': '<|endoftext|>', 'additional_special_tokens': ['<|endoftext|>', '[MASK]', '[gMASK]', '[sMASK]', '', '', '<|system|>', '<|user|>', '<|assistant|>', '<|observation|>', '<|begin_of_image|>', '<|end_of_image|>', '<|begin_of_video|>', '<|end_of_video|>']}, clean_up_tokenization_spaces=False), added_tokens_decoder={ 151329: AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151330: AddedToken("[MASK]", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151331: AddedToken("[gMASK]", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151332: AddedToken("[sMASK]", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151333: AddedToken("", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151334: AddedToken("", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151335: AddedToken("<|system|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151336: AddedToken("<|user|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151337: AddedToken("<|assistant|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151338: AddedToken("<|observation|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151339: AddedToken("<|begin_of_image|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151340: AddedToken("<|end_of_image|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151341: AddedToken("<|begin_of_video|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151342: AddedToken("<|end_of_video|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), })'. Use repo_type argument if needed.

The above exception was the direct cause of the following exception:

Traceback (most recent call last): File "/jcdata/llm/chatglm4/basic_demo/openai_api_server_weitiao.py", line 670, in engine = AsyncLLMEngine.from_engine_args(engine_args) File "/root/miniconda3/envs/glm4/lib/python3.10/site-packages/vllm/engine/async_llm_engine.py", line 362, in from_engine_args engine_config = engine_args.create_engine_config() File "/root/miniconda3/envs/glm4/lib/python3.10/site-packages/vllm/engine/arg_utils.py", line 559, in create_engine_config model_config = ModelConfig( File "/root/miniconda3/envs/glm4/lib/python3.10/site-packages/vllm/config.py", line 129, in init self.hf_config = get_config(self.model, trust_remote_code, revision, File "/root/miniconda3/envs/glm4/lib/python3.10/site-packages/vllm/transformers_utils/config.py", line 27, in get_config config = AutoConfig.from_pretrained( File "/root/miniconda3/envs/glm4/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, kwargs) File "/root/miniconda3/envs/glm4/lib/python3.10/site-packages/transformers/configuration_utils.py", line 631, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, kwargs) File "/root/miniconda3/envs/glm4/lib/python3.10/site-packages/transformers/configuration_utils.py", line 686, in _get_config_dict resolved_config_file = cached_file( File "/root/miniconda3/envs/glm4/lib/python3.10/site-packages/transformers/utils/hub.py", line 462, in cached_file raise EnvironmentError( OSError: Incorrect path_or_model_id: '(PeftModelForCausalLM( (base_model): LoraModel( (model): ChatGLMForConditionalGeneration( (transformer): ChatGLMModel( (embedding): Embedding( (word_embeddings): Embedding(151552, 4096) ) (rotary_pos_emb): RotaryEmbedding() (encoder): GLMTransformer( (layers): ModuleList( (0-39): 40 x GLMBlock( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (query_key_value): lora.Linear( (base_layer): Linear(in_features=4096, out_features=4608, bias=True) (lora_dropout): ModuleDict( (default): Dropout(p=0.1, inplace=False) ) (lora_A): ModuleDict( (default): Linear(in_features=4096, out_features=8, bias=False) ) (lora_B): ModuleDict( (default): Linear(in_features=8, out_features=4608, bias=False) ) (lora_embedding_A): ParameterDict() (lora_embedding_B): ParameterDict() ) (core_attention): SdpaAttention( (attention_dropout): Dropout(p=0.0, inplace=False) ) (dense): Linear(in_features=4096, out_features=4096, bias=False) ) (post_attention_layernorm): RMSNorm() (mlp): MLP( (dense_h_to_4h): Linear(in_features=4096, out_features=27392, bias=False) (dense_4h_to_h): Linear(in_features=13696, out_features=4096, bias=False) ) ) ) (final_layernorm): RMSNorm() ) (output_layer): Linear(in_features=4096, out_features=151552, bias=False) ) ) ) ), ChatGLM4Tokenizer(name_or_path='/jcdata/llm/chatglm4/glm-4-9b-chat/', vocab_size=151329, model_max_length=128000, is_fast=False, padding_side='left', truncation_side='right', special_tokens={'eos_token': '<|endoftext|>', 'pad_token': '<|endoftext|>', 'additional_special_tokens': ['<|endoftext|>', '[MASK]', '[gMASK]', '[sMASK]', '', '', '<|system|>', '<|user|>', '<|assistant|>', '<|observation|>', '<|begin_of_image|>', '<|end_of_image|>', '<|begin_of_video|>', '<|end_of_video|>']}, clean_up_tokenization_spaces=False), added_tokens_decoder={ 151329: AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151330: AddedToken("[MASK]", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151331: AddedToken("[gMASK]", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151332: AddedToken("[sMASK]", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151333: AddedToken("", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151334: AddedToken("", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151335: AddedToken("<|system|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151336: AddedToken("<|user|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151337: AddedToken("<|assistant|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151338: AddedToken("<|observation|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151339: AddedToken("<|begin_of_image|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151340: AddedToken("<|end_of_image|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151341: AddedToken("<|begin_of_video|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), 151342: AddedToken("<|end_of_video|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), })'. Please provide either the path to a local folder or the repo_id of a model on the Hub.

zRzRzRzRzRzRzR commented 3 months ago

微调模型不能直接用的,vLLM的合并请查看vLLM官方代码来合并lora文件