meta-llama / codellama

Inference code for CodeLlama models
Other
15.96k stars 1.86k forks source link

Memory Usage Spike During Model Saving in Training Pipeline #115

Open HumzaSami00 opened 1 year ago

HumzaSami00 commented 1 year ago

I have a question regarding my fine-tuning pipeline, specifically concerning a memory usage spike when the model saves checkpoint during the training step. This cause sudden CUDA Memory error.

I would like to provide the following information, including GPU usage logs and code snippets for reference:

GPU - Used: 12.2GB, Free: 11.5GB
GPU - Used: 12.3GB, Free: 11.4GB
GPU - Used: 12.3GB, Free: 11.4GB
GPU - Used: 22.2GB, Free: 1.5GB
GPU - Used: 18.8GB, Free: 4.8GB
GPU - Used: 18.8GB, Free: 4.8GB
GPU - Used: 18.8GB, Free: 4.8GB
GPU - Used: 22.2GB, Free: 1.5GB
GPU - Used: 23.0GB, Free: 0.7GB
GPU - Used: 23.3GB, Free: 0.4GB
GPU - Used: 23.3GB, Free: 0.4GB
GPU - Used: 23.3GB, Free: 0.4GB
GPU - Used: 23.6GB, Free: 0.1GB
GPU - Used: 19.9GB, Free: 3.8GB
GPU - Used: 19.9GB, Free: 3.8GB
GPU - Used: 22.1GB, Free: 1.6GB
import json
import torch
import pandas as pd
import datasets
from peft import LoraConfig,PeftModel
from transformers import (AutoModelForCausalLM,AutoTokenizer,TrainingArguments,BitsAndBytesConfig)
import transformers
from trl import SFTTrainer
from training_args import *
import os

import logging
import sys

output_dir = "CodeLlama-7b-Instruct-HF-results/trl-trainer/Complete_dataset_training/"

if not os.path.exists(output_dir):
    # If the directory doesn't exist, create it
    os.makedirs(output_dir)
    print(f"Directory '{output_dir}' created.")
else:
    print(f"Directory '{output_dir}' already exists.")

# Create a logger instance
logger = logging.getLogger()
logger.setLevel(logging.INFO)

# Create a formatter with the desired format
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')

# Create a stream handler to output log messages to the console
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)

# Create a file handler to log messages to a file
file_handler = logging.FileHandler(f'{output_dir}/trl-trainer-codellama.txt', encoding='utf-8')  # Specify the file name here
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler(stream=sys.stdout)

# DEVICE = "cuda:0" if torch.cuda.is_available() else 'cpu'

MODEL_NAME = "CodeLlama-7b-Instruct-HF/"

# loading dataset
dataset = datasets.load_from_disk("../dataset/complete_overlapped_chat_format_171781/")
# loading model
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME,use_safetensors=True,load_in_8bit=True,trust_remote_code=True,device_map='auto')
# loading tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, add_special_tokens=False, add_eos_token=False, add_bos_token=False)
# LORA Configuration
peft_config = LoraConfig(
    lora_alpha=32,
    lora_dropout=0.05,
    r = 12,
    bias="none",
    task_type = "CAUSAL_LM",
    target_modules = ["q_proj", "v_proj","k_proj","o_proj","gate_proj","up_proj","down_proj","lm_head"]
)

training_arguments = TrainingArguments(
    per_device_train_batch_size=8,
    gradient_accumulation_steps=4,
    optim="paged_adamw_32bit",
    learning_rate=4e-4,
    fp16=True,
    max_grad_norm=0.3,
    num_train_epochs=3,
    warmup_ratio=0.05,
    logging_steps=5,
    save_total_limit=5,
    save_strategy="steps",
    save_steps=1,
    group_by_length=True,
    output_dir=output_dir,
    report_to="tensorboard",
    save_safetensors=True,
    lr_scheduler_type="cosine",
    seed=42)

trainer = SFTTrainer(
    model=model,
    train_dataset=dataset,
    peft_config=peft_config,
    dataset_text_field="text",
    max_seq_length=4096,
    tokenizer=tokenizer,
    args=training_arguments,
)

trainer.tokenizer.pad_token = False
trainer.tokenizer.pad_token

try:
    trainer.train()
except Exception as e:
    logger.error(f"Error in Logs due to {e}")
GaganHonor commented 1 year ago

are GPU drivers up to date ?

your code looks reasonable, still you can try --

training_arguments = TrainingArguments(
    per_device_train_batch_size=4,  # Reduced batch size
    gradient_accumulation_steps=8,  # Increased gradient accumulation steps
    optim="paged_adamw_32bit",
    learning_rate=4e-4,
    fp16=True,
    max_grad_norm=0.3,
    num_train_epochs=3,
    warmup_ratio=0.05,
    logging_steps=5,
    save_total_limit=5,
    save_strategy="steps",
    save_steps=1,
    group_by_length=True,
    output_dir=output_dir,
    report_to="tensorboard",
    save_safetensors=True,
    lr_scheduler_type="cosine",
    seed=42
)