您好,我在更改教程2.1的代码时,尝试了更换其中的PrefixTuningTemplate为ManualTemplate。发现对于单个样本,print出来的wrapped_example是一样的。然而在训练时会出现 element 0 of tensors does not require grad and does not have a grad_fn的问题,我想这是某些参数固定无法训练的原因?
我尝试了将优化器中的param改为模型的model.parameters(),这个问题依然会出现。请问ManualTemplate是否设定了某些参数不能训练?或者我应该做些什么使之可以使用。
我使用的代码如下:
import torch
from openprompt.data_utils.conditional_generation_dataset import WebNLGProcessor
dataset = {}
dataset['train'] = WebNLGProcessor().get_train_examples("./datasets/CondGen/webnlg_2017/")[:1000]
dataset['test'] = WebNLGProcessor().get_test_examples("./datasets/CondGen/webnlg_2017/")[:100]
# load a pretrained model, its tokenizer, its config, and its TokenzerWrapper by one function
from openprompt.plms import load_plm
plm, tokenizer, model_config, WrapperClass = load_plm("t5", "t5-base")
# Instantiating the PrefixTuning Template !
from openprompt.prompts.prefix_tuning_template import PrefixTuningTemplate
from openprompt.prompts import ManualTemplate
# mytemplate = PrefixTuningTemplate(model=plm, tokenizer=tokenizer, text=' {"placeholder":"text_a"} {"special": "<eos>"} {"mask"} ', using_decoder_past_key_values=True)
mytemplate = ManualTemplate(tokenizer=tokenizer, text=' {"placeholder":"text_a"} {"special": "<eos>"} {"mask"} ')
# To better understand how does the template wrap the example, we visualize one instance.
# You may observe that the example doesn't end with <|endoftext|> token. Don't worry, adding specific end-of-text token
# is a language-model-specific token. we will add it for you in the TokenizerWrapper once you pass `predict_eos_token=True`
wrapped_example = mytemplate.wrap_one_example(dataset['train'][0])
print(wrapped_example)
# Your can loop over the dataset by yourself by subsequently call mytemplate.wrap_one_example and WrapperClass().tokenizer()
# but we have provide a PromptDataLoader for you.
from openprompt import PromptDataLoader
train_dataloader = PromptDataLoader(dataset=dataset["train"], template=mytemplate, tokenizer=tokenizer,
tokenizer_wrapper_class=WrapperClass, max_seq_length=256, decoder_max_length=256,
batch_size=5,shuffle=True, teacher_forcing=True, predict_eos_token=True, # be sure to pass predict_eos_token=True if your template doesn't contain one, or you model may fail to stop generation.
truncate_method="head")
test_dataloader = PromptDataLoader(dataset=dataset["test"], template=mytemplate, tokenizer=tokenizer,
tokenizer_wrapper_class=WrapperClass, max_seq_length=256, decoder_max_length=256,
batch_size=5,shuffle=False, teacher_forcing=False, predict_eos_token=True,
truncate_method="head")
# load the pipeline model PromptForGeneration.
from openprompt import PromptForGeneration
prompt_model = PromptForGeneration(plm=plm,template=mytemplate, freeze_plm=True,tokenizer=tokenizer, plm_eval_mode="store_true")
use_cuda = True
if use_cuda:
prompt_model= prompt_model.cuda()
from transformers import AdamW
# Follow PrefixTuning(https://github.com/XiangLi1999/PrefixTuning), we also fix the language model
# only include the template's parameters in training.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in mytemplate.named_parameters() if (not any(nd in n for nd in no_decay)) and p.requires_grad],
# "params": prompt_model.parameters(),
"weight_decay": 0.0,
},
{
"params": [p for n, p in mytemplate.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=5e-5, eps=1e-8)
from transformers.optimization import get_linear_schedule_with_warmup
tot_step = len(train_dataloader)*5
scheduler = get_linear_schedule_with_warmup(optimizer, 0, tot_step)
# We provide generation a generation metric, you can also define your own. Note that it's not directly comparable to WebNLG's scripts evaluation.
from openprompt.utils.metrics import generation_metric
# Define evaluate function
def evaluate(prompt_model, dataloader):
generated_sentence = []
groundtruth_sentence = []
prompt_model.eval()
for step, inputs in enumerate(dataloader):
if use_cuda:
inputs = inputs.cuda()
_, output_sentence = prompt_model.generate(inputs, **generation_arguments)
generated_sentence.extend(output_sentence)
groundtruth_sentence.extend(inputs['tgt_text'])
score = generation_metric(generated_sentence, groundtruth_sentence, "sentence_bleu")
print("test_score", score, flush=True)
return generated_sentence
generation_arguments = {
"max_length": 512,
"max_new_tokens": None,
"min_length": 5,
"temperature": 1.0,
"do_sample": False,
"top_k": 0,
"top_p": 0.9,
"repetition_penalty": 1.0,
"num_beams": 5,
"bad_words_ids": [[628], [198]]
}
# training and generation.
global_step = 0
tot_loss = 0
log_loss = 0
for epoch in range(3):
prompt_model.train()
for step, inputs in enumerate(train_dataloader):
global_step +=1
if use_cuda:
inputs = inputs.cuda()
loss = prompt_model(inputs)
loss.backward()
tot_loss += loss.item()
torch.nn.utils.clip_grad_norm_(mytemplate.parameters(), 1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if global_step %50 ==0:
print("Epoch {}, global_step {} average loss: {} lr: {}".format(epoch, global_step, (tot_loss-log_loss)/50, scheduler.get_last_lr()[0]), flush=True)
log_loss = tot_loss
generated_sentence = evaluate(prompt_model, test_dataloader)
with open(f"./Generated_sentence_webnlg_gpt2.txt",'w') as f:
for i in generated_sentence:
f.write(i+"\n")
您好,我在更改教程2.1的代码时,尝试了更换其中的PrefixTuningTemplate为ManualTemplate。发现对于单个样本,print出来的wrapped_example是一样的。然而在训练时会出现 element 0 of tensors does not require grad and does not have a grad_fn的问题,我想这是某些参数固定无法训练的原因? 我尝试了将优化器中的param改为模型的model.parameters(),这个问题依然会出现。请问ManualTemplate是否设定了某些参数不能训练?或者我应该做些什么使之可以使用。 我使用的代码如下: