jingyaogong / minimind

「大模型」3小时完全从0训练26M的小参数GPT,个人显卡即可推理训练!
https://jingyaogong.github.io/minimind
Apache License 2.0
2.7k stars 329 forks source link

5-dpo_train.py 问题 #57

Closed StudyingLover closed 1 month ago

StudyingLover commented 1 month ago

发现 5-dpo_train.py 有两个bug

  1. 没有使用sft的模型,5-dpo_train.py源码是
    model_name_or_path = "minimind"
    tokenizer_name_or_path = "minimind"
    model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True)
    tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, trust_remote_code=True, use_fast=False)
  2. 运行发生报错
    Traceback (most recent call last):
    File "/home/llm/minimind/5-dpo_train.py", line 81, in <module>
    dpo_trainer = DPOTrainer(
                  ^^^^^^^^^^^
    File "/home/llm/minimind/.venv/lib/python3.12/site-packages/huggingface_hub/utils/_deprecation.py", line 101, in inner_f
    return f(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^
    File "/home/llm/minimind/.venv/lib/python3.12/site-packages/trl/trainer/dpo_trainer.py", line 467, in __init__
    if args.model_init_kwargs is None:
       ^^^^^^^^^^^^^^^^^^^^^^
    AttributeError: 'NoneType' object has no attribute 'model_init_kwargs'
StudyingLover commented 1 month ago

解决方案, 后面会pr 首先运行 python export_model.py 导出huggingface格式数据 然后修改5-dpo_train.py代码成

import os

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

import torch
from transformers import TrainingArguments, AutoModelForCausalLM, AutoTokenizer
from trl import DPOTrainer, DPOConfig
from peft import get_peft_model, LoraConfig, TaskType
from datasets import load_dataset

def find_all_linear_names(model):
    cls = torch.nn.Linear
    lora_module_names = set()
    for name, module in model.named_modules():
        if isinstance(module, cls):
            names = name.split(".")
            lora_module_names.add(names[0] if len(names) == 1 else names[-1])

    if "lm_head" in lora_module_names:
        lora_module_names.remove("lm_head")
    return list(lora_module_names)

def init_model():
    device = "cuda:0"
    # Do model patching and add fast LoRA weights
    model_name_or_path = "minimind-v1-small"
    tokenizer_name_or_path = "./model/minimind_tokenizer"
    model = AutoModelForCausalLM.from_pretrained(
        model_name_or_path, trust_remote_code=True
    )
    tokenizer = AutoTokenizer.from_pretrained(
        tokenizer_name_or_path, trust_remote_code=True, use_fast=False
    )
    tokenizer.pad_token = tokenizer.eos_token
    target_modules = find_all_linear_names(model)
    peft_config = LoraConfig(
        task_type=TaskType.CAUSAL_LM,
        r=8,
        lora_alpha=16,
        lora_dropout=0.1,
        inference_mode=False,
        target_modules=target_modules,
    )
    model = get_peft_model(model, peft_config)
    model.print_trainable_parameters()
    model = model.to(device)
    return model, tokenizer

if __name__ == "__main__":
    model, tokenizer = init_model()
    training_args = DPOConfig(
        output_dir="./minimind_dpo",
        per_device_train_batch_size=1,
        remove_unused_columns=False,
    )

    ################
    # Dataset
    ################
    # 确保路径正确,文件存在
    dataset_path = "./dataset/dpo/train_data.json"

    # 加载数据集
    train_dataset = load_dataset("json", data_files=dataset_path)

    dpo_trainer = DPOTrainer(
        model,
        ref_model=None,
        args=training_args,
        beta=0.1,
        train_dataset=train_dataset["train"],
        tokenizer=tokenizer,
        max_length=512,
        max_prompt_length=512,
    )
    dpo_trainer.train()