pytorch / opacus

Training PyTorch models with differential privacy
https://opacus.ai
Apache License 2.0
1.68k stars 335 forks source link

ValueError: Per sample gradient is not initialized. Not updated in backward pass? #469

Closed long21wt closed 2 years ago

long21wt commented 2 years ago

🐛 Bug

Hi, I want to train m2m100 from scratch on wmt19 dataset using opacus privacy engine. However, I encounter this bug. lines of code are just very quick prototype to train the model with huggingface ecosystem.

To Reproduce

Here is my code:

import torch
import random
import argparse
import numpy as np
from tqdm.auto import tqdm
from accelerate import Accelerator
from transformers import M2M100Tokenizer, M2M100Config, M2M100ForConditionalGeneration, DataCollatorForSeq2Seq
from torch.utils.data import DataLoader
from datasets import load_dataset, load_metric
from transformers import get_scheduler
from torch.optim import AdamW
from opacus import PrivacyEngine
from opacus.validators import ModuleValidator

random.seed(666)
np.random.seed(666)
torch.manual_seed(666)
torch.cuda.manual_seed_all(666)
torch.backends.cudnn.deterministic = True

parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument("-dp", "--diff_priv", default=False, type=bool)
parser.add_argument("-e", "--epsilon", default=1, type=int)

args = parser.parse_args()
print(args)
raw_datasets = load_dataset('wmt19', "cs-en")

tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="cs", tgt_lang="en")
config = M2M100Config.from_pretrained("facebook/m2m100_418M")
model = M2M100ForConditionalGeneration(config)

max_input_length = 256
max_target_length = 256

def preprocess_function(examples):
    inputs = [ex["cs"] for ex in examples["translation"]]
    targets = [ex["en"] for ex in examples["translation"]]
    model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True)

    # Set up the tokenizer for targets
    with tokenizer.as_target_tokenizer():
        labels = tokenizer(targets, max_length=max_target_length, truncation=True)

    model_inputs["labels"] = labels["input_ids"]
    return model_inputs

tokenized_datasets = raw_datasets.map(
    preprocess_function,
    batched=True,
    remove_columns=raw_datasets["train"].column_names,
)

tokenized_datasets.set_format("torch")

data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)

metric = load_metric('sacrebleu')

def postprocess(predictions, labels):
    predictions = predictions.cpu().numpy()
    labels = labels.cpu().numpy()

    decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True)

    # Replace -100 in the labels as we can't decode them.
    labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
    decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)

    # Some simple post-processing
    decoded_preds = [pred.strip() for pred in decoded_preds]
    decoded_labels = [[label.strip()] for label in decoded_labels]
    return decoded_preds, decoded_labels

train_dataloader = DataLoader(
    tokenized_datasets["train"],
    shuffle=True,
    collate_fn=data_collator,
    batch_size=8,
)

eval_dataloader = DataLoader(
    tokenized_datasets["validation"], collate_fn=data_collator, batch_size=8
)

model = ModuleValidator.fix(model)

optimizer = AdamW(model.parameters(), lr=2e-5)

if args.diff_priv == True:
    privacy_engine = PrivacyEngine()
    model, optimizer, train_dataloader = privacy_engine.make_private_with_epsilon(
        module=model,
        optimizer=optimizer,
        data_loader=train_dataloader,
        target_delta=1e-5,
        target_epsilon=args.epsilon,
        epochs=1,
        max_grad_norm=0.1
    )
    print("With private engine")

accelerator = Accelerator()
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
    model, optimizer, train_dataloader, eval_dataloader
)

num_train_epochs = 1
num_update_steps_per_epoch = len(train_dataloader)
num_training_steps = num_train_epochs * num_update_steps_per_epoch

lr_scheduler = get_scheduler(
    "linear",
    optimizer=optimizer,
    num_warmup_steps=0,
    num_training_steps=num_training_steps,
)

progress_bar = tqdm(range(num_training_steps))

for epoch in range(num_train_epochs):
    # Training
    model.train()
    for batch in train_dataloader:
        outputs = model(**batch)
        loss = outputs.loss
        accelerator.backward(loss)

        optimizer.step()
        lr_scheduler.step()
        optimizer.zero_grad()
        progress_bar.update(1)

    # Evaluation
    model.eval()
    for batch in tqdm(eval_dataloader):
        with torch.no_grad():
            generated_tokens = accelerator.unwrap_model(model).generate(
                batch["input_ids"],
                attention_mask=batch["attention_mask"],
                max_length=256,
                forced_bos_token_id=tokenizer.get_lang_id("en")
            )
        labels = batch["labels"]

        # Necessary to pad predictions and labels for being gathered
        generated_tokens = accelerator.pad_across_processes(
            generated_tokens, dim=1, pad_index=tokenizer.pad_token_id
        )
        labels = accelerator.pad_across_processes(labels, dim=1, pad_index=-100)

        predictions_gathered = accelerator.gather(generated_tokens)
        labels_gathered = accelerator.gather(labels)

        decoded_preds, decoded_labels = postprocess(predictions_gathered, labels_gathered)
        metric.add_batch(predictions=decoded_preds, references=decoded_labels)

    results = metric.compute()
    print(f"epoch {epoch}, BLEU score: {results['score']:.2f}")

    # Save and upload
    accelerator.wait_for_everyone()
    unwrapped_model = accelerator.unwrap_model(model)
    if args.diff_priv == True:
        unwrapped_model.save_pretrained(f"m2m100_from_scratch_wmt19_private_{args.epsilon}epsilon", save_function=accelerator.save)
        if accelerator.is_main_process:
            tokenizer.save_pretrained(f"m2m100_from_scratch_wmt19_tokenizer_private_{args.epsilon}epsilon")
    else:
        unwrapped_model.save_pretrained("m2m100_from_scratch_wmt19", save_function=accelerator.save)
        if accelerator.is_main_process:
            tokenizer.save_pretrained("m2m100_from_scratch_wmt19_tokenizer")

Environment

long21wt commented 2 years ago

The issue comes directly at the privacy_engine.make_private_with_epsilon() step

ashkan-software commented 2 years ago

Hello,

Thank you for flagging this!

Could you please also provide a reproducible code using our template? (I want to know what the stack of error is that you are seeing)

long21wt commented 2 years ago

Thanks, it seems like either problems with dependencies or Python version. On Colab with Python 3.7.13, privacy engine works well. I will try with different environment setup on my machine.

long21wt commented 2 years ago

Hi, On Colab, the privacy engine works well. https://colab.research.google.com/drive/1TOELSJQ7OyOGc55o32JZz57rCwPHGubo But I can not run it on my cluster machine, the same error. Here is the detailed log:

08/14/2022 12:08:00:WARNING:Reusing dataset wmt19 (/storage/ukp/work/vu/.cache/huggingface/datasets/wmt19/cs-en/1.0.0/aeadcbe9f1cbf9969e603239d33d3e43670cf250c1158edf74f5f6e74d4f21d0)
Namespace(diff_priv=True, epsilon=1)
100%|██████████| 7271/7271 [38:29<00:00,  3.15ba/s]

  0%|          | 0/3 [00:00<?, ?ba/s]
 33%|███▎      | 1/3 [00:00<00:00,  2.40ba/s]
 67%|██████▋   | 2/3 [00:00<00:00,  2.33ba/s]
100%|██████████| 3/3 [00:01<00:00,  2.38ba/s]
100%|██████████| 3/3 [00:01<00:00,  2.37ba/s]
/storage/ukp/work/vu/miniconda/envs/py37/lib/python3.7/site-packages/opacus/privacy_engine.py:134: UserWarning: Secure RNG turned off. This is perfectly fine for experimentation as it allows for much faster training performance, but remember to turn it on and retrain one last time before production with ``secure_mode`` turned on.
  "Secure RNG turned off. This is perfectly fine for experimentation as it allows "
/storage/ukp/work/vu/miniconda/envs/py37/lib/python3.7/site-packages/opacus/accountants/analysis/rdp.py:333: UserWarning: Optimal order is the largest alpha. Please consider expanding the range of alphas to get a tighter privacy bound.
  f"Optimal order is the {extreme} alpha. Please consider expanding the range of alphas to get a tighter privacy bound."
With private engine

  0%|          | 0/908836 [00:00<?, ?it/s]/storage/ukp/work/vu/miniconda/envs/py37/lib/python3.7/site-packages/torch/nn/modules/module.py:1053: UserWarning: Using a non-full backward hook when the forward contains multiple autograd Nodes is deprecated and will be removed in future versions. This hook will be missing some grad_input. Please use register_full_backward_hook to get the documented behavior.
  warnings.warn("Using a non-full backward hook when the forward contains multiple autograd Nodes "
/storage/ukp/work/vu/miniconda/envs/py37/lib/python3.7/site-packages/torch/nn/modules/module.py:1018: UserWarning: Using non-full backward hooks on a Module that does not return a single Tensor or a tuple of Tensors is deprecated and will be removed in future versions. This hook will be missing some of the grad_output. Please use register_full_backward_hook to get the documented behavior.
  warnings.warn("Using non-full backward hooks on a Module that does not return a "
Traceback (most recent call last):
  File "private_mt.py", line 133, in <module>
    optimizer.step()
  File "/storage/ukp/work/vu/miniconda/envs/py37/lib/python3.7/site-packages/torch/optim/lr_scheduler.py", line 65, in wrapper
    return wrapped(*args, **kwargs)
  File "/storage/ukp/work/vu/miniconda/envs/py37/lib/python3.7/site-packages/accelerate/optimizer.py", line 140, in step
    self.optimizer.step(closure)
  File "/storage/ukp/work/vu/miniconda/envs/py37/lib/python3.7/site-packages/opacus/optimizers/optimizer.py", line 509, in step
    if self.pre_step():
  File "/storage/ukp/work/vu/miniconda/envs/py37/lib/python3.7/site-packages/opacus/optimizers/optimizer.py", line 490, in pre_step
    self.clip_and_accumulate()
  File "/storage/ukp/work/vu/miniconda/envs/py37/lib/python3.7/site-packages/opacus/optimizers/optimizer.py", line 397, in clip_and_accumulate
    g.view(len(g), -1).norm(2, dim=-1) for g in self.grad_samples
  File "/storage/ukp/work/vu/miniconda/envs/py37/lib/python3.7/site-packages/opacus/optimizers/optimizer.py", line 344, in grad_samples
    ret.append(_get_flat_grad_sample(p))
  File "/storage/ukp/work/vu/miniconda/envs/py37/lib/python3.7/site-packages/opacus/optimizers/optimizer.py", line 200, in _get_flat_grad_sample
    "Per sample gradient is not initialized. Not updated in backward pass?"
ValueError: Per sample gradient is not initialized. Not updated in backward pass?
long21wt commented 2 years ago

My env information


Collecting environment information...
PyTorch version: 1.12.1+cu113
Is debug build: False
CUDA used to build PyTorch: 11.3
ROCM used to build PyTorch: N/A

OS: CentOS Linux release 7.9.2009 (Core) (x86_64)
GCC version: (GCC) 4.8.5 20150623 (Red Hat 4.8.5-44)
Clang version: Could not collect
CMake version: Could not collect
Libc version: glibc-2.17

Python version: 3.7.13 (default, Mar 29 2022, 02:18:16)  [GCC 7.5.0] (64-bit runtime)
Python platform: Linux-3.10.0-1160.71.1.el7.x86_64-x86_64-with-centos-7.9.2009-Core
Is CUDA available: True
CUDA runtime version: Could not collect
GPU models and configuration: GPU 0: Tesla V100-PCIE-32GB
Nvidia driver version: 515.48.07
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True

Versions of relevant libraries:
[pip3] mypy-extensions==0.4.3
[pip3] numpy==1.21.6
[pip3] numpydoc==1.2
[pip3] torch==1.12.1+cu113
[pip3] torchaudio==0.12.1+cu113
[pip3] torchvision==0.13.1+cu113
[conda] blas                      1.0                         mkl  
[conda] mkl                       2021.4.0           h06a4308_640  
[conda] mkl-service               2.4.0            py37h7f8727e_0  
[conda] mkl_fft                   1.3.1            py37hd3c417c_0  
[conda] mkl_random                1.2.2            py37h51133e4_0  
[conda] numpy                     1.21.6                   pypi_0    pypi
[conda] numpydoc                  1.2                pyhd3eb1b0_0  
[conda] torch                     1.12.1+cu113             pypi_0    pypi
[conda] torchaudio                0.12.1+cu113             pypi_0    pypi
[conda] torchvision               0.13.1+cu113             pypi_0    pypi
long21wt commented 2 years ago

I'll close this issue because conflict between accelerate with opacus