RuntimeError: Internal: could not parse ModelProto from /mnt/data/legalexp/LLM_exp/MiniCPM/minicpm_finetune_baseline/MiniCPM-2B-sft-bf16/tokenizer.model #3849
Traceback (most recent call last):
File "/mnt/data/legalexp/LLM_exp/LLaMA-Factory/examples/lora_single_gpu/../../src/train_bash.py", line 14, in
main()
File "/mnt/data/legalexp/LLM_exp/LLaMA-Factory/examples/lora_single_gpu/../../src/train_bash.py", line 5, in main
run_exp()
File "/mnt/data/legalexp/LLM_exp/LLaMA-Factory/src/llmtuner/train/tuner.py", line 39, in run_exp
run_dpo(model_args, data_args, training_args, finetuning_args, callbacks)
File "/mnt/data/legalexp/LLM_exp/LLaMA-Factory/src/llmtuner/train/dpo/workflow.py", line 27, in run_dpo
tokenizer = load_tokenizer(model_args)
File "/mnt/data/legalexp/LLM_exp/LLaMA-Factory/src/llmtuner/model/loader.py", line 41, in load_tokenizer
tokenizer = AutoTokenizer.from_pretrained(
File "/home/hcq/miniconda3/envs/llama_factory/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py", line 825, in from_pretrained
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, *kwargs)
File "/home/hcq/miniconda3/envs/llama_factory/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2048, in from_pretrained
return cls._from_pretrained(
File "/home/hcq/miniconda3/envs/llama_factory/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2287, in _from_pretrained
tokenizer = cls(init_inputs, **init_kwargs)
File "/home/hcq/miniconda3/envs/llama_factory/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama.py", line 182, in init
self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False))
File "/home/hcq/miniconda3/envs/llama_factory/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama.py", line 209, in get_spm_processor
tokenizer.Load(self.vocab_file)
File "/home/hcq/miniconda3/envs/llama_factory/lib/python3.10/site-packages/sentencepiece/init.py", line 961, in Load
return self.LoadFromFile(model_file)
File "/home/hcq/miniconda3/envs/llama_factory/lib/python3.10/site-packages/sentencepiece/init.py", line 316, in LoadFromFile
return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg)
RuntimeError: Internal: could not parse ModelProto from /mnt/data/legalexp/LLM_exp/MiniCPM/minicpm_finetune_baseline/MiniCPM-2B-sft-bf16/tokenizer.model
Reminder
Reproduction
!/bin/bash
CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \ --stage dpo \ --do_train \ --model_name_or_path /mnt/data/legalexp/LLM_exp/MiniCPM/minicpm_finetune_baseline/MiniCPM-2B-sft-bf16 \ --adapter_name_or_path /mnt/data/legalexp/LLM_exp/MiniCPM/minicpm_finetune_baseline_v2/output/CJOLoRA/checkpoint-5000 \ --create_new_adapter \ --dataset 1.generation_train.json,1.generation_eval.json \ --dataset_dir ./mnt/data/legalexp/LLM_exp/MiniCPM/minicpm_finetune_baseline_v2/data/CJOChatML_DPO/ \ --template cpm \ --finetuning_type lora \ --lora_target q_proj,v_proj \ --output_dir ../../saves/LLaMA2-7B/lora/dpo \ --overwrite_cache \ --overwrite_output_dir \ --cutoff_len 2048 \ --preprocessing_num_workers 16 \ --per_device_train_batch_size 1 \ --per_device_eval_batch_size 1 \ --gradient_accumulation_steps 8 \ --lr_scheduler_type cosine \ --logging_steps 10 \ --warmup_steps 20 \ --save_steps 100 \ --eval_steps 100 \ --evaluation_strategy steps \ --load_best_model_at_end \ --learning_rate 1e-5 \ --num_train_epochs 1.0 \ --max_samples 1000 \ --val_size 0.1 \ --dpo_ftx 1.0 \ --plot_loss \ --fp16
Expected behavior
Traceback (most recent call last): File "/mnt/data/legalexp/LLM_exp/LLaMA-Factory/examples/lora_single_gpu/../../src/train_bash.py", line 14, in
main()
File "/mnt/data/legalexp/LLM_exp/LLaMA-Factory/examples/lora_single_gpu/../../src/train_bash.py", line 5, in main
run_exp()
File "/mnt/data/legalexp/LLM_exp/LLaMA-Factory/src/llmtuner/train/tuner.py", line 39, in run_exp
run_dpo(model_args, data_args, training_args, finetuning_args, callbacks)
File "/mnt/data/legalexp/LLM_exp/LLaMA-Factory/src/llmtuner/train/dpo/workflow.py", line 27, in run_dpo
tokenizer = load_tokenizer(model_args)
File "/mnt/data/legalexp/LLM_exp/LLaMA-Factory/src/llmtuner/model/loader.py", line 41, in load_tokenizer
tokenizer = AutoTokenizer.from_pretrained(
File "/home/hcq/miniconda3/envs/llama_factory/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py", line 825, in from_pretrained
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, *kwargs)
File "/home/hcq/miniconda3/envs/llama_factory/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2048, in from_pretrained
return cls._from_pretrained(
File "/home/hcq/miniconda3/envs/llama_factory/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2287, in _from_pretrained
tokenizer = cls(init_inputs, **init_kwargs)
File "/home/hcq/miniconda3/envs/llama_factory/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama.py", line 182, in init
self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False))
File "/home/hcq/miniconda3/envs/llama_factory/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama.py", line 209, in get_spm_processor
tokenizer.Load(self.vocab_file)
File "/home/hcq/miniconda3/envs/llama_factory/lib/python3.10/site-packages/sentencepiece/init.py", line 961, in Load
return self.LoadFromFile(model_file)
File "/home/hcq/miniconda3/envs/llama_factory/lib/python3.10/site-packages/sentencepiece/init.py", line 316, in LoadFromFile
return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg)
RuntimeError: Internal: could not parse ModelProto from /mnt/data/legalexp/LLM_exp/MiniCPM/minicpm_finetune_baseline/MiniCPM-2B-sft-bf16/tokenizer.model
System Info
transformers
version: 4.38.1Others
No response