hiyouga / LLaMA-Factory

Unified Efficient Fine-Tuning of 100+ LLMs (ACL 2024)
https://arxiv.org/abs/2403.13372
Apache License 2.0
32.49k stars 3.98k forks source link

Continuous pretraining CodeLlama34B get loss 0.0 error #1883

Closed chuan298 closed 9 months ago

chuan298 commented 10 months ago

Reminder

Reproduction

My command:

NCCL_DEBUG=INFO CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 deepspeed --num_gpus 8 --num_nodes 1 --master_port=9902 src/train_bash.py     --deepspeed zero3.json     --stage pt     --model_name_or_path /raid/CodeLlama-34_EXTEND_VOCAB_V8_UPDATE/     --do_train     --dataset mtet,translate_mix     --finetuning_type full     --output_dir /raid/output_MODEL_CODELLAMA34B_EXT     --cache_path /raid/cache_path_2048/    --per_device_train_batch_size 1     --gradient_accumulation_steps 16     --lr_scheduler_type cosine     --logging_steps 10     --save_steps 1000     --learning_rate 5e-4     --num_train_epochs 1.0     --plot_loss     --bf16     --dataset_dir /raid/DATA_PRETRAIN_PHRASE1/ --flash_attn --cutoff_len 2048 --overwrite_output_dir --use_fast_tokenizer false --preprocessing_num_workers 160

My zero3 config:

{
    "fp16": {
        "enabled": "auto",
        "loss_scale": 0,
        "loss_scale_window": 1000,
        "initial_scale_power": 16,
        "hysteresis": 2,
        "min_loss_scale": 1
    },
    "bf16": {
        "enabled": "auto"
    },
    "optimizer": {
        "type": "AdamW",
        "params": {
            "lr": "auto",
            "betas": "auto",
            "eps": "auto",
            "weight_decay": "auto"
        }
    },
    "scheduler": {
        "type": "WarmupLR",
        "params": {
            "warmup_min_lr": "auto",
            "warmup_max_lr": "auto",
            "warmup_num_steps": "auto"
        }
    },
    "zero_optimization": {
        "stage": 3,
        "offload_optimizer": {
            "device": "cpu",
            "pin_memory": true,
            "ratio": 0.1
        },
        "offload_param": {
            "device": "none",
            "pin_memory": true
        },
        "overlap_comm": true,
        "contiguous_gradients": true,
        "sub_group_size": 1e9,
        "reduce_bucket_size": "auto",
        "stage3_prefetch_bucket_size": "auto",
        "stage3_param_persistence_threshold": "auto",
        "stage3_max_live_parameters": 1e9,
        "stage3_max_reuse_distance": 1e9,
        "stage3_gather_16bit_weights_on_model_save": true
    },
    "gradient_accumulation_steps": "auto",
    "gradient_clipping": "auto",
    "steps_per_print": 100,
    "train_batch_size": "auto",
    "train_micro_batch_size_per_gpu": "auto",
    "wall_clock_breakdown": false
}

Errorr: 0%|▊ | 55/13943 [45:06<189:33:19, 49.14s/it {'loss': 0.0, 'learning_rate': 0.0004999993654043075, 'epoch': 0.0}
{'loss': 0.0, 'learning_rate': 0.0004999974616204515, 'epoch': 0.0}
{'loss': 0.0, 'learning_rate': 0.000499994288658097, 'epoch': 0.0} {'loss': 0.0, 'learning_rate': 0.0004999898465333526, 'epoch': 0.0}
{'loss': 0.0, 'learning_rate': 0.0004999841352687698, 'epoch': 0.0}

Expected behavior

No response

System Info

GPU: 8*A100 80GB

deepspeed=0.12.5 transformers=4.37.0.dev0 flash-attn=2.3.3 torch=2.0.1+cu118 bitsandbytes=0.41.3 accelerate=0.24.1

Others

I extended my model from 32000 to 46069 vocab size to continue pretraining with my domain data.

chuan298 commented 10 months ago

When i run with this code, I get normal loss. I also use same dataset with the above code.

# This code is based on the revised code from fastchat based on tatsu-lab/stanford_alpaca.

from dataclasses import dataclass, field
import json
import math
import logging
import os
from typing import Dict, Optional, List
import torch
from torch.utils.data import Dataset
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
import transformers
from transformers import DataCollatorForLanguageModeling, Trainer, GPTQConfig, set_seed
import deepspeed
from transformers.trainer_pt_utils import LabelSmoother
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
import torch.distributed as dist

IGNORE_TOKEN_ID = LabelSmoother.ignore_index
#from flash_attn_patch import replace_llama_attn_with_flash_attn
#replace_llama_attn_with_flash_attn()

@dataclass
class ModelArguments:
    model_name_or_path: Optional[str] = field(default="codellama/CodeLlama-34b-hf")
    tokenizer_name_or_path: str = ""

@dataclass
class DataArguments:
    data_path: str = field(
        default=None, metadata={"help": "Path to the training data."}
    )
    eval_data_path: str = field(
        default=None, metadata={"help": "Path to the evaluation data."}
    )
    lazy_preprocess: bool = False
    max_train_samples: Optional[int] = None

@dataclass
class TrainingArguments(transformers.TrainingArguments):
    cache_dir: Optional[str] = field(default=None)
    optim: str = field(default="adamw_bnb_8bit")
    model_max_length: int = field(
        default=8192,
        metadata={
            "help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
        },
    )
    use_lora: bool = False
    resume_from_checkpoint: bool = True

@dataclass
class LoraArguments:
    lora_r: int = 64
    lora_alpha: int = 16
    lora_dropout: float = 0.05
    lora_target_modules: List[str] = field(
        default_factory=lambda: ["c_attn", "c_proj", "w1", "w2"]
    )
    lora_weight_path: str = ""
    lora_bias: str = "none"
    q_lora: bool = False

def maybe_zero_3(param):
    if hasattr(param, "ds_id"):
        assert param.ds_status == ZeroParamStatus.NOT_AVAILABLE
        with zero.GatheredParameters([param]):
            param = param.data.detach().cpu().clone()
    else:
        param = param.detach().cpu().clone()
    return param

# Borrowed from peft.utils.get_peft_model_state_dict
def get_peft_state_maybe_zero_3(named_params, bias):
    if bias == "none":
        to_return = {k: t for k, t in named_params if "lora_" in k}
    elif bias == "all":
        to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k}
    elif bias == "lora_only":
        to_return = {}
        maybe_lora_bias = {}
        lora_bias_names = set()
        for k, t in named_params:
            if "lora_" in k:
                to_return[k] = t
                bias_name = k.split("lora_")[0] + "bias"
                lora_bias_names.add(bias_name)
            elif "bias" in k:
                maybe_lora_bias[k] = t
        for k, t in maybe_lora_bias:
            if bias_name in lora_bias_names:
                to_return[bias_name] = t
    else:
        raise NotImplementedError
    to_return = {k: maybe_zero_3(v) for k, v in to_return.items()}
    return to_return

local_rank = None

def rank0_print(*args):
    if local_rank == 0:
        print(*args)

def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str, bias="none"):
    """Collects the state dict and dump to disk."""
    # check if zero3 mode enabled
    if deepspeed.is_deepspeed_zero3_enabled():
        state_dict = trainer.model_wrapped._zero3_consolidated_16bit_state_dict()
    else:
        if trainer.args.use_lora:
            state_dict = get_peft_state_maybe_zero_3(
                trainer.model.named_parameters(), bias
            )
        else:
            state_dict = trainer.model.state_dict()
    if trainer.args.should_save and trainer.args.local_rank == 0:
        trainer._save(output_dir, state_dict=state_dict)

import datasets
def make_unsupervised_data_module(
    tokenizer: transformers.PreTrainedTokenizer, data_args, max_len,
) -> Dict:
    """Make dataset and collator for pretraining."""

    rank0_print("Loading data...")

    train_dataset = datasets.load_from_disk(data_args.data_path)
    #print(train_dataset[0])
    if data_args.eval_data_path:
        eval_dataset = datasets.load_from_disk(data_args.eval_data_path)
    else:
        eval_dataset = None

    return dict(train_dataset=train_dataset, eval_dataset=eval_dataset)

from transformers.trainer_utils import get_last_checkpoint    
logger = logging.getLogger(__name__)

def train():
    # global local_rank

    parser = transformers.HfArgumentParser(
        (ModelArguments, DataArguments, TrainingArguments, LoraArguments)
    )
    (
        model_args,
        data_args,
        training_args,
        lora_args,
    ) = parser.parse_args_into_dataclasses()

    # Detecting last checkpoint.
    last_checkpoint = None
    if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
        last_checkpoint = get_last_checkpoint(training_args.output_dir)
        if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
            raise ValueError(
                f"Output directory ({training_args.output_dir}) already exists and is not empty. "
                "Use --overwrite_output_dir to overcome."
            )
        elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
            logger.info(
                f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
                "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
            )

    # Set seed before initializing model.
    set_seed(training_args.seed)

    compute_dtype = (
        torch.float16
        if training_args.fp16
        else (torch.bfloat16 if training_args.bf16 else torch.float32)
    )

    local_rank = training_args.local_rank

    device_map = None
    world_size = int(os.environ.get("WORLD_SIZE", 1))
    ddp = world_size != 1
    if lora_args.q_lora:
        device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)} if ddp else None
        if len(training_args.fsdp) > 0 or deepspeed.is_deepspeed_zero3_enabled():
            logging.warning(
                "FSDP or ZeRO3 are not incompatible with QLoRA."
            )

    # Set RoPE scaling factor
    config = transformers.AutoConfig.from_pretrained(
        model_args.model_name_or_path,
        cache_dir=training_args.cache_dir,
        #trust_remote_code=True,
    )
    config.use_cache = False

    # Load model and tokenizer
    model = transformers.AutoModelForCausalLM.from_pretrained(
        model_args.model_name_or_path,
        config=config,
        cache_dir=training_args.cache_dir,
        device_map=device_map,
        torch_type=compute_dtype,
        use_flash_attention_2=True,
        #trust_remote_code=True,
        quantization_config=GPTQConfig(
            bits=4, disable_exllama=True
        )
        if training_args.use_lora and lora_args.q_lora
        else None,
    )
    tokenizer = transformers.AutoTokenizer.from_pretrained(
        model_args.tokenizer_name_or_path,
        cache_dir=training_args.cache_dir,
        model_max_length=training_args.model_max_length,
        padding_side="right",
        use_fast=False,
        #legacy=False
        #trust_remote_code=True,
    )
    tokenizer.pad_token_id = tokenizer.eos_token_id
    tokenizer.pad_token = tokenizer.eos_token
    if training_args.use_lora:
        lora_config = LoraConfig(
            r=lora_args.lora_r,
            lora_alpha=lora_args.lora_alpha,
            target_modules=lora_args.lora_target_modules,
            lora_dropout=lora_args.lora_dropout,
            bias=lora_args.lora_bias,
            task_type="CAUSAL_LM",
            modules_to_save=["wte", "lm_head"]  # This argument serves for adding new tokens.
        )
        if lora_args.q_lora:
            model = prepare_model_for_kbit_training(
                model, use_gradient_checkpointing=training_args.gradient_checkpointing
            )

        model = get_peft_model(model, lora_config)

        if training_args.gradient_checkpointing:
            model.enable_input_require_grads()
    print(model)
    #for name, param in model.named_parameters():
    #    #print(name)
    #    if "embed_tokens" not in name:
    #        param.requires_grad = False 
    #model.enable_input_require_grads()
    #logger.info(f"trainable params: {model.num_parameters(only_trainable=True)/model.num_parameters()}")
    # Load data
    data_module = make_unsupervised_data_module(
        tokenizer=tokenizer, data_args=data_args, max_len=training_args.model_max_length
    )
    collate_fn = DataCollatorForLanguageModeling(tokenizer, mlm=False)

    # Start trainner
    trainer = Trainer(
        model=model, tokenizer=tokenizer, args=training_args, data_collator=collate_fn, **data_module
    )

    checkpoint = None
    if training_args.resume_from_checkpoint is not None:
        checkpoint = training_args.resume_from_checkpoint
    elif last_checkpoint is not None:
        checkpoint = last_checkpoint
    train_result = trainer.train(resume_from_checkpoint=checkpoint)

    metrics = train_result.metrics

    # max_train_samples = (
    #     data_args.max_train_samples if data_args.max_train_samples is not None else len(data_module["train_dataset"])
    # )
    # metrics["train_samples"] = min(max_train_samples, len(data_module["train_dataset"]))

    trainer.log_metrics("train", metrics)
    trainer.save_metrics("train", metrics)
    trainer.save_state() 
    trainer.save_model()

if __name__ == "__main__":
    train()
hiyouga commented 10 months ago

How about removing the --flash_attn argument?

hiyouga commented 9 months ago

update code