pytorch / opacus

Training PyTorch models with differential privacy
https://opacus.ai
Apache License 2.0
1.65k stars 328 forks source link

LLM finetuning with Opacus #632

Closed KKNakkav2 closed 3 months ago

KKNakkav2 commented 4 months ago

I am finetuning the BERT-LLM with Opacus wrapper and encountered an issue inside the opacus optimizer. Can you please advise on the next steps for resolving the error.

import torch
import torch.nn as nn
import torch.optim as optim
from transformers import BertForSequenceClassification, BertTokenizer
from datasets import load_dataset
from torch.utils.data import DataLoader
from opacus import PrivacyEngine

# Load and preprocess the SST-2 dataset
dataset = load_dataset("glue", "sst2", cache_dir="/home/krishna/fs-llm/data")
train_dataset = dataset["train"]
print(train_dataset)

# Load pre-trained BERT model and tokenizer
model = BertForSequenceClassification.from_pretrained(
    "bert-base-uncased", cache_dir="/assets/SSD/hub")
tokenizer = BertTokenizer.from_pretrained(
    "bert-base-uncased", cache_dir="/assets/SSD/hub")

# Define data loader

def collate_fn(batch):
    sentences = [b["sentence"] for b in batch]
    inputs = tokenizer(sentences, padding=True,
                       truncation=True, max_length=128, return_tensors="pt")
    inputs["labels"] = torch.tensor([b["label"] for b in batch])
    return inputs

train_loader = DataLoader(train_dataset, batch_size=32, collate_fn=collate_fn)

# Define optimizer
optimizer = optim.SGD(model.parameters(), lr=0.01)

model.train().cuda()

print(model)

# Define privacy engine
privacy_engine = PrivacyEngine()
model, optimizer, train_loader = privacy_engine.make_private(
    module=model,
    optimizer=optimizer,
    data_loader=train_loader,
    noise_multiplier=1.0,
    max_grad_norm=1.0,
)

print(model)

# Define loss function
criterion = nn.CrossEntropyLoss()

# Train the model
for epoch in range(5):  # Adjust number of epochs as needed
    model.train()
    for batch in train_loader:
        optimizer.zero_grad()
        inputs = {key: value.cuda() for key, value in batch.items()}
        outputs = model(**inputs)
        loss = criterion(outputs.logits, inputs["labels"].cuda())
        loss.backward()
        optimizer.step()
    print(f"Epoch {epoch+1} finished.")

Error:

GradSampleModule(BertForSequenceClassification(
  (bert): BertModel(
    (embeddings): BertEmbeddings(
      (word_embeddings): Embedding(30522, 768, padding_idx=0)
      (position_embeddings): Embedding(512, 768)
      (token_type_embeddings): Embedding(2, 768)
      (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
      (dropout): Dropout(p=0.1, inplace=False)
    )
    (encoder): BertEncoder(
      (layer): ModuleList(
        (0-11): 12 x BertLayer(
          (attention): BertAttention(
            (self): BertSelfAttention(
              (query): Linear(in_features=768, out_features=768, bias=True)
              (key): Linear(in_features=768, out_features=768, bias=True)
              (value): Linear(in_features=768, out_features=768, bias=True)
              (dropout): Dropout(p=0.1, inplace=False)
            )
            (output): BertSelfOutput(
              (dense): Linear(in_features=768, out_features=768, bias=True)
              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
              (dropout): Dropout(p=0.1, inplace=False)
            )
          )
          (intermediate): BertIntermediate(
            (dense): Linear(in_features=768, out_features=3072, bias=True)
            (intermediate_act_fn): GELUActivation()
          )
          (output): BertOutput(
            (dense): Linear(in_features=3072, out_features=768, bias=True)
            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
            (dropout): Dropout(p=0.1, inplace=False)
          )
        )
      )
    )
    (pooler): BertPooler(
      (dense): Linear(in_features=768, out_features=768, bias=True)
      (activation): Tanh()
    )
  )
  (dropout): Dropout(p=0.1, inplace=False)
  (classifier): Linear(in_features=768, out_features=2, bias=True)
))

Traceback (most recent call last):
  File "/home/krishna/fs-llm-temp/temp2.py", line 66, in <module>
    optimizer.step()
  File "/home/krishna/miniconda3/envs/fs-llm/lib/python3.9/site-packages/opacus/optimizers/optimizer.py", line 518, in step
    if self.pre_step():
  File "/home/krishna/miniconda3/envs/fs-llm/lib/python3.9/site-packages/opacus/optimizers/optimizer.py", line 499, in pre_step
    self.clip_and_accumulate()
  File "/home/krishna/miniconda3/envs/fs-llm/lib/python3.9/site-packages/opacus/optimizers/optimizer.py", line 404, in clip_and_accumulate
    per_sample_norms = torch.stack(per_param_norms, dim=1).norm(2, dim=1)
RuntimeError: stack expects each tensor to be equal size, but got [34] at entry 0 and [1] at entry 1
scakc commented 4 months ago

Facing exact similar issue while training distilgpt2 for causal LM

Update: Resolved for me, the position encoding (wpe) parameter's grad_sampler was getting a batch size of [1], its cuz they are same for all inputs in batch, and are single array rather than a batch_size x seq_len matrix, so i simply passed it as input to model and it worked. @KKNakkav2 you can try this and see if it works for you

HuanyuZhang commented 3 months ago

Yeah, as @scakc mentioned, I do not think Opacus right now supports gpt based models. You can either follow the instruction from @scakc , or use Xuechen's fix (https://github.com/lxuechen/private-transformers)