Open dlttmd opened 12 months ago
혹시 해결 하셨을까요???
from transformers import BertModel, DistilBertModel
distilbert_model = DistilBertModel.from_pretrained('monologg/distilkobert')
tokenizer를 가져오는 과정에서 오류가 나기에, 기존 kobert tokenizer을 가져왔습니다.
from kobert_tokenizer import KoBERTTokenizer
tokenizer = KoBERTTokenizer.from_pretrained('skt/kobert-base-v1')
def forward에서 기존 kobert에 있던 segement_ids를 제거합니다.
class DistilBERTClassifier(nn.Module):
def __init__(self,
distilbert,
hidden_size=768,
num_classes=6, ## 조정필요 ##
dr_rate=None,
params=None):
super(DistilBERTClassifier, self).__init__()
self.distilbert = distilbert
self.dr_rate = dr_rate
self.classifier = nn.Linear(hidden_size, num_classes)
if dr_rate:
self.dropout = nn.Dropout(p=dr_rate)
def gen_attention_mask(self, token_ids, valid_length):
attention_mask = torch.zeros_like(token_ids)
for i, v in enumerate(valid_length):
attention_mask[i][:v] = 1
return attention_mask.float()
def forward(self, token_ids, valid_length):
attention_mask = self.gen_attention_mask(token_ids, valid_length)
pooler = self.distilbert(input_ids=token_ids, attention_mask=attention_mask.float().to(token_ids.device)).last_hidden_state.mean(dim=1)
# 수정된 부분: pooler를 직접 계산하여 하나의 값만 반환
if self.dr_rate:
out = self.dropout(pooler)
return self.classifier(out)
model = DistilBERTClassifier(distilbert_model, dr_rate=0.5).to(device)
out = model(token_ids, valid_length) 학습에서 기존에 model에 넣었던 segement_ids를 제거합니다.
for e in range(start_ephochs, num_epochs):
train_acc = 0.0
test_acc = 0.0
model.train()
for batch_id, (token_ids, valid_length, segment_ids, label) in tqdm(enumerate(train_dataloader), total=len(train_dataloader)):
optimizer.zero_grad()
token_ids = token_ids.long().to(device)
segment_ids = segment_ids.long().to(device)
valid_length= valid_length
label = label.long().to(device)
# print(f"token_ids: {token_ids}\n valid_length: {valid_length}\n segment_ids: {segment_ids}\n label: {label}")
out = model(token_ids, valid_length)
loss = loss_fn(out, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
train_acc += calc_accuracy(out, label)
if batch_id % log_interval == 0:
print("epoch {} batch id {} loss {} train acc {}".format(e+1, batch_id+1, loss.data.cpu().numpy(), train_acc / (batch_id+1)))
torch.save(model.state_dict(), f'/content/gdrive/MyDrive/ColabNotebooks/checkpoint/checkpoint_epoch_{e+1}.ckpt')
# torch.save(model.state_dict(), f'/content/gdrive/MyDrive/ColabNotebooks/checkpoint/checkpoint_epoch_5.1.ckpt')
print("epoch {} train acc {}".format(e+1, train_acc / (batch_id+1)))
for batch_id, (token_ids, valid_length, segment_ids, label) in tqdm(enumerate(test_dataloader), total=len(test_dataloader)):
token_ids = token_ids.long().to(device)
segment_ids = segment_ids.long().to(device)
valid_length= valid_length
label = label.long().to(device)
out = model(token_ids, valid_length)
test_acc += calc_accuracy(out, label)
print("epoch {} test acc {}".format(e+1, test_acc / (batch_id+1)))
Downgrading transformers solved the problem: pip install transformers==4.20.1
안녕하세요! 파이썬을 공부중인 학생입니다! KoBert 모델을 사용하려던 도중 토크나이저를 불러오는데 에러가 발생하여 질문을 남깁니다!
tokenizer = KoBertTokenizer.from_pretrained('monologg/kobert')
코드 실행 시
The tokenizer class you load from this checkpoint is not the same type as the class this function is called from. It may result in unexpected tokenization. The tokenizer class you load from this checkpoint is 'BertTokenizer'. The class this function is called from is 'KoBertTokenizer'.
AttributeError Traceback (most recent call last)