osuossu8 / paper-reading

6 stars 0 forks source link

BERT Model Tuning Magic From Kaggle Solutions #28

Open osuossu8 opened 1 year ago

osuossu8 commented 1 year ago

re-initialize layers

https://www.kaggle.com/competitions/commonlitreadabilityprize/discussion/257302

self.model = AutoModel.from_pretrained(model_name, config=self.config)

def _init_weights(self, module):
    if isinstance(module, nn.Linear):
        module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        if module.bias is not None:
            module.bias.data.zero_()
    elif isinstance(module, nn.Embedding):
        module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        if module.padding_idx is not None:
            module.weight.data[module.padding_idx].zero_()
    elif isinstance(module, nn.LayerNorm):
        module.bias.data.zero_()
        module.weight.data.fill_(1.0)

if self.cfg.reinit_layers > 0:
    layers = self.model.encoder.layer[-self.cfg.reinit_layers:]
    for layer in layers:
        for module in layer.modules():
            self._init_weights(module)
osuossu8 commented 1 year ago

evaluated every X steps (not epoch)

https://www.kaggle.com/competitions/commonlitreadabilityprize/discussion/257844

def train_one_epoch(model, optimizer, scheduler, dataloader, valid_loader, device, epoch, best_score, valid_labels):
    model.train()
    for step, data in enumerate(dataloader):
        if (step > 0) & (step % CFG.eval_freq == 0) :
            valid_epoch_loss, pred = valid_one_epoch(model, valid_loader, device, epoch)
            model.train()
    return epoch_loss, valid_epoch_loss, pred, best_score
osuossu8 commented 1 year ago

gradient clipping

https://github.com/TakoiHirokazu/kaggle_commonLit_readability_prize/blob/2e2f0028737948d9c30be5908cfc05eb7b1fb531/exp/ex507.py#L250

pytorch

for i, d in enumerate(train_loader):
    optimizer.zero_grad()
    output, _ = model(input_ids, mask)
    loss = criterion(output, target)
    loss.backward()
    torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
    optimizer.step()
    scheduler.step()

tensorflow

optimizer = tfa.optimizers.AdamW(
    lr=Config.lr
    , weight_decay=Config.weight_decay
    , clipnorm =1.0
)
osuossu8 commented 1 year ago

Dropout set to 0

pytorch

self.bert = transformers.BertModel.from_pretrained(BERT_MODEL,
                                                   hidden_dropout_prob=0,
                                                   attention_probs_dropout_prob=0)

tensorflow

from transformers import TFAutoModel, AutoConfig
cfg = AutoConfig.from_pretrained('roberta-base')
cfg.hidden_dropout_prob = 0
cfg.attention_probs_dropout_prob = 0
roberta = TFAutoModel.from_pretrained('roberta-base',config=cfg)
osuossu8 commented 1 year ago

freeze weight of layers

pytorch

def freeze(module):
    """
    Freezes module's parameters.
    """

    for parameter in module.parameters():
        parameter.requires_grad = False

self.model = AutoModel.from_pretrained(model_name, config=self.config)

# Freeze
if self.cfg.freezing:
    freeze(self.model.embeddings)
    freeze(self.model.encoder.layer[:2])

tensorflow https://note.nkmk.me/python-tensorflow-keras-trainable-freeze-unfreeze/

transformer = TFAutoModel.from_pretrained(Config.model, config=cfg)

# freeze
transformer.roberta.embeddings.trainable = False
for l in transformer.roberta.encoder.layer[:2]:
    l.trainable = False