pytorch / opacus

Training PyTorch models with differential privacy
https://opacus.ai
Apache License 2.0
1.65k stars 328 forks source link

Got error when finetuning a gpt-like model with DPSGD #592

Closed xiehuanyi closed 8 months ago

xiehuanyi commented 1 year ago

I am trying to finetune a gpt-like model with DPSGD to protect the data privacy. However I got an error (It works fine when use torch.optim.SGD) when applying privacy engine. Error is shown below.

Traceback (most recent call last):
  File "main.py", line 50, in <module>
    main()
  File "main.py", line 47, in main
    args=args
  File "/code/src/finetune.py", line 19, in finetune
    optimizer.step()
  File "/opt/conda/lib/python3.7/site-packages/opacus/optimizers/optimizer.py", line 513, in step
    if self.pre_step():
  File "/opt/conda/lib/python3.7/site-packages/opacus/optimizers/optimizer.py", line 494, in pre_step
    self.clip_and_accumulate()
  File "/opt/conda/lib/python3.7/site-packages/opacus/optimizers/optimizer.py", line 404, in clip_and_accumulate
    per_sample_norms = torch.stack(per_param_norms, dim=1).norm(2, dim=1)
RuntimeError: stack expects each tensor to be equal size, but got [32] at entry 0 and [1] at entry 234

And here are my code.

def finetune(model, train_dataloader, optimizer, device, privacy_engine, tokenizer, eval_dataloader, args):
    model = model.to(device)
    steps = 0
    for epoch in range(1, args.epochs+1):
        losses = []
        for step, batch in enumerate(tqdm(train_dataloader)):
            optimizer.zero_grad()
            inputs = {k: v.to(device) for k, v in batch.items()}
            inputs['labels'] = inputs['input_ids']
            outputs = model(**inputs) # output = loss, logits, hidden_states, attentions
            loss = outputs.loss
            loss.backward()
            losses.append(loss.item())
            optimizer.step()
            steps += 1

            if steps > 0 and steps % args.logging_intervals == 0:
                train_loss = np.mean(losses)
                eps = privacy_engine.get_privacy_spent(args.delta)
#                 bleu = eval(model, tokenizer, eval_dataloader, device)
                print(
                f"Epoch: {epoch} | "
                f"Step: {step} | " 
                f"Train loss: {train_loss:.3f} | "
#                 f"BLEU: {bleu:.2f} |"
#                 f"Best alpha: {best_alpha:.2f} | "
                f"ɛ: {eps:.2f}"
                )

from src.args import parse_args
from src.dataset import get_dataloader
from src.models import get_model_and_tokenizer
from src.finetune import finetune
from torch.optim import SGD
import torch
from opacus import PrivacyEngine

def main():
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print(f"Using device: {device}")
    args = parse_args()
    model, tokenizer = get_model_and_tokenizer(args)
    print("model and tokenizer loaded!")
    train_loader, eval_loader = get_dataloader(args, tokenizer)
    print("train and evaluate dataloader loaded!")
    optimizer = SGD(model.parameters(), lr=args.lr)
    print(f"optimizer: {type(optimizer)}")
    # Privacy engine
    print(f"Initializing privacy engine...")
    privacy_engine = PrivacyEngine()
    model, optimizer, data_loader = privacy_engine.make_private(
                                                                module=model,
                                                                optimizer=optimizer,
                                                                data_loader=train_loader,
                                                                noise_multiplier=1.1,
                                                                max_grad_norm=1.0,
                                                                )

    print("Privacy engine loaded!")
    print("starting finetune!")
    finetune(
        model=model,
        train_dataloader=train_loader,
        optimizer=optimizer,
        device=device, 
        privacy_engine=privacy_engine,
        tokenizer=tokenizer,
        eval_dataloader=eval_loader,
        args=args
        )

main()

And my environment is shown below.

Package                  Version
------------------------ ----------
accelerate               0.20.3
aiohttp                  3.8.3
aiosignal                1.3.1
anyio                    3.6.2
argon2-cffi              21.3.0
argon2-cffi-bindings     21.2.0
async-timeout            4.0.2
asynctest                0.13.0
attrs                    21.4.0
Babel                    2.11.0
backcall                 0.2.0
beautifulsoup4           4.10.0
bleach                   5.0.0
brotlipy                 0.7.0
certifi                  2021.10.8
cffi                     1.14.6
chardet                  4.0.0
charset-normalizer       2.1.1
conda                    4.10.3
conda-build              3.21.5
conda-package-handling   1.7.3
cryptography             35.0.0
datasets                 2.7.1
debugpy                  1.6.0
decorator                5.1.0
defusedxml               0.7.1
dill                     0.3.6
dnspython                2.1.0
elastic-transport        8.4.0
elasticsearch            8.5.3
entrypoints              0.4
evaluate                 0.4.0
faiss-gpu                1.7.2
fastjsonschema           2.15.3
filelock                 3.3.1
frozenlist               1.3.3
fsspec                   2022.11.0
glob2                    0.7
huggingface-hub          0.11.1
idna                     2.10
importlib-metadata       4.11.3
importlib-resources      5.7.1
ipykernel                6.13.0
ipython                  7.29.0
ipython-genutils         0.2.0
jedi                     0.18.0
jieba                    0.42.1
Jinja2                   3.1.1
json5                    0.9.6
jsonschema               4.4.0
jupyter-client           7.3.0
jupyter-core             4.10.0
jupyter-server           1.23.3
jupyterlab               3.0.0
jupyterlab-pygments      0.2.2
jupyterlab-server        2.16.5
libarchive-c             2.9
MarkupSafe               2.0.1
matplotlib-inline        0.1.2
mistune                  0.8.4
mkl-fft                  1.3.1
mkl-random               1.2.2
mkl-service              2.4.0
multidict                6.0.3
multiprocess             0.70.14
nbclassic                1.0.0
nbclient                 0.6.0
nbconvert                6.5.0
nbformat                 5.3.0
nest-asyncio             1.5.5
nlp                      0.4.0
notebook                 6.4.11
notebook-shim            0.2.3
numpy                    1.21.2
nvidia-cublas-cu11       11.10.3.66
nvidia-cuda-nvrtc-cu11   11.7.99
nvidia-cuda-runtime-cu11 11.7.99
nvidia-cudnn-cu11        8.5.0.96
olefile                  0.46
opacus                   1.4.0
opt-einsum               3.3.0
packaging                21.3
pandas                   1.3.5
pandocfilters            1.5.0
parso                    0.8.2
peft                     0.3.0
pexpect                  4.8.0
pickleshare              0.7.5
Pillow                   8.4.0
pip                      21.0.1
pkginfo                  1.7.1
prometheus-client        0.14.1
prompt-toolkit           3.0.20
psutil                   5.8.0
ptyprocess               0.7.0
pyarrow                  10.0.1
pycosat                  0.6.3
pycparser                2.20
Pygments                 2.10.0
pyOpenSSL                20.0.1
pyparsing                3.0.8
pyrsistent               0.18.1
PySocks                  1.7.1
python-dateutil          2.8.2
python-etcd              0.4.5
pytz                     2021.3
PyYAML                   6.0
pyzmq                    22.3.0
regex                    2022.10.31
requests                 2.28.1
responses                0.18.0
ruamel-yaml-conda        0.15.100
scipy                    1.7.3
Send2Trash               1.8.0
sentencepiece            0.1.99
setuptools               58.0.4
six                      1.16.0
sniffio                  1.3.0
soupsieve                2.2.1
terminado                0.13.3
tinycss2                 1.1.1
tokenizers               0.13.2
torch                    1.13.1
torchelastic             0.2.0
torchtext                0.11.0
torchvision              0.11.1
tornado                  6.1
tqdm                     4.64.1
traitlets                5.1.0
transformers             4.25.1
typing-extensions        3.10.0.2
urllib3                  1.26.6
wcwidth                  0.2.5
webencodings             0.5.1
websocket-client         1.4.2
wheel                    0.36.2
xxhash                   3.1.0
yarl                     1.8.2
zipp                     3.8.0

Could anyone help me with this?

HuanyuZhang commented 1 year ago

Could you please reveal your model architecture? Have you ever checked whether this model is compatible with Opacus (https://opacus.ai/tutorials/guide_to_module_validator)?

HuanyuZhang commented 8 months ago

Close the issue due to not responding. Feel free to re-open if there is a need.

ShayanShamsi commented 8 months ago

Hello @HuanyuZhang. I am facing a very similar issue. I hope you can help me out. I am using the dp-transformers library to implement differential privacy in a text classification task using bert-tiny. The issue seems to be very similar to the one described by @xiehuanyi.

Here is the code:

import datasets
import dp_transformers
import transformers
import sys

from dataclasses import dataclass, field, asdict
from peft import get_peft_model, LoraConfig

@dataclass
class ModelArguments:
    model_name: str = field(default="gpt2", metadata={
        "help": "Model name in HuggingFace, e.g. 'gpt2'"
    })
    sequence_len: int = field(default=128, metadata={
        "help": "Maximum sequence length"
    })

@dataclass
class LoraArguments:
    enable_lora: bool = field(default=False, metadata={
        "help": "Whether to enable LoRA"
    })
    lora_dim: int = field(default=8, metadata={
        "help": "LoRA dimension"
    })
    lora_alpha: int = field(default=8, metadata={
        "help": "LoRA alpha"
    })
    lora_dropout: float = field(default=0.0, metadata={
        "help": "LoRA dropout"
    })

    def as_peft_config(self) -> LoraConfig:
        if not self.enable_lora:
            raise ValueError("LoRA is not enabled, cannot convert to LoRA config")
        params = asdict(self)
        params.pop("enable_lora")
        params["r"] = params.pop("lora_dim")
        return LoraConfig(**params)

@dataclass
class Arguments:
    train: dp_transformers.TrainingArguments
    privacy: dp_transformers.PrivacyArguments
    model: ModelArguments
    lora: LoraConfig

def main(args: Arguments):
    transformers.set_seed(args.train.seed)

    # Load model
    model = transformers.BertForSequenceClassification.from_pretrained(args.model.model_name)

    # Load data
    dataset = datasets.load_dataset('glue', 'sst2', split="train").train_test_split(0.02, seed=args.train.seed)

    # Load tokenizer
    tokenizer = transformers.BertTokenizer.from_pretrained(args.model.model_name)

    dataset = dataset.map(
        lambda examples: tokenizer(examples['sentence'], padding="max_length", truncation=True, max_length=args.model.sequence_len),
        desc="tokenizing dataset", remove_columns=['sentence', 'idx']
    )
    dataset.rename_column("label", "labels")

    model = model.cuda()
    model.train()

    data_collator = transformers.DataCollatorWithPadding(tokenizer, padding="longest")

    trainer = dp_transformers.dp_utils.OpacusDPTrainer(
        args=train_args,
        model=model,
        train_dataset=dataset['train'],
        eval_dataset=dataset['test'],
        data_collator=data_collator,
        privacy_args=privacy_args,
    )

    try:
        trainer.train()
    finally:
        eps_prv = trainer.get_prv_epsilon()
        eps_rdp = trainer.get_rdp_epsilon()
        trainer.log({
            "final_epsilon_prv": eps_prv,
            "final_epsilon_rdp": eps_rdp
        })

train_args=dp_transformers.TrainingArguments(
    output_dir="scratch",
    per_device_train_batch_size=64,
    gradient_accumulation_steps=1,
    evaluation_strategy="steps",
    eval_steps=45,
    per_device_eval_batch_size=64,
    eval_accumulation_steps=1,
    seed=42,
    weight_decay=0.01,
    remove_unused_columns=False,
    num_train_epochs=1,
    logging_steps=5,
    max_grad_norm=0,
    lr_scheduler_type="constant",
    learning_rate=3e-4,
    label_names="labels"
)
privacy_args=dp_transformers.PrivacyArguments(
    per_sample_max_grad_norm=1.0,
    target_epsilon=8
)
model_args=ModelArguments(
    model_name="prajjwal1/bert-tiny",
    sequence_len=512
)
lora_args=LoraArguments(
    enable_lora=True,
    lora_dim=4,
    lora_alpha=32,
    lora_dropout=0.0
)
main(Arguments(train=train_args, privacy=privacy_args, model=model_args, lora=lora_args))

Here is the stack trace showing the error:

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
[<ipython-input-24-7cff3dd13869>](https://localhost:8080/#) in <cell line: 33>()
     31     lora_dropout=0.0
     32 )
---> 33 main(Arguments(train=train_args, privacy=privacy_args, model=model_args, lora=lora_args))

7 frames
[<ipython-input-23-9c34a7138e6f>](https://localhost:8080/#) in main(args)
     87 
     88     try:
---> 89         trainer.train()
     90     finally:
     91         eps_prv = trainer.get_prv_epsilon()

[/usr/local/lib/python3.10/dist-packages/transformers/trainer.py](https://localhost:8080/#) in train(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)
   1643             self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size
   1644         )
-> 1645         return inner_training_loop(
   1646             args=args,
   1647             resume_from_checkpoint=resume_from_checkpoint,

[/usr/local/lib/python3.10/dist-packages/transformers/trainer.py](https://localhost:8080/#) in _inner_training_loop(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval)
   1996                         optimizer_was_run = scale_before <= scale_after
   1997                     else:
-> 1998                         self.optimizer.step()
   1999                         optimizer_was_run = not self.accelerator.optimizer_step_was_skipped
   2000 

[/usr/local/lib/python3.10/dist-packages/accelerate/optimizer.py](https://localhost:8080/#) in step(self, closure)
    143                 self._accelerate_step_called = False
    144             else:
--> 145                 self.optimizer.step(closure)
    146 
    147     def _switch_parameters(self, parameters_map):

[/usr/local/lib/python3.10/dist-packages/torch/optim/lr_scheduler.py](https://localhost:8080/#) in wrapper(*args, **kwargs)
     66                 instance._step_count += 1
     67                 wrapped = func.__get__(instance, cls)
---> 68                 return wrapped(*args, **kwargs)
     69 
     70             # Note that the returned function here is no longer a bound method,

[/usr/local/lib/python3.10/dist-packages/opacus/optimizers/optimizer.py](https://localhost:8080/#) in step(self, closure)
    511                 closure()
    512 
--> 513         if self.pre_step():
    514             return self.original_optimizer.step()
    515         else:

[/usr/local/lib/python3.10/dist-packages/opacus/optimizers/optimizer.py](https://localhost:8080/#) in pre_step(self, closure)
    492                 returns the loss. Optional for most optimizers.
    493         """
--> 494         self.clip_and_accumulate()
    495         if self._check_skip_next_step():
    496             self._is_last_step_skipped = True

[/usr/local/lib/python3.10/dist-packages/opacus/optimizers/optimizer.py](https://localhost:8080/#) in clip_and_accumulate(self)
    402                 g.reshape(len(g), -1).norm(2, dim=-1) for g in self.grad_samples
    403             ]
--> 404             per_sample_norms = torch.stack(per_param_norms, dim=1).norm(2, dim=1)
    405             per_sample_clip_factor = (
    406                 self.max_grad_norm / (per_sample_norms + 1e-6)

RuntimeError: stack expects each tensor to be equal size, but got [64] at entry 0 and [1] at entry 1