huggingface / transformers

🤗 Transformers: State-of-the-art Machine Learning for Pytorch, TensorFlow, and JAX.
https://huggingface.co/transformers
Apache License 2.0
131.72k stars 26.22k forks source link

"Sequence Classification with IMDb Reviews " error, when using "bert-base-multilingual-cased" model. #7421

Closed baiziyuandyufei closed 3 years ago

baiziyuandyufei commented 3 years ago

Environment info

Who can help

Information

Model I am using (Bert, XLNet ...):

The problem arises when using:

The tasks I am working on is:

To reproduce

Steps to reproduce the behavior:

  1. reference the code https://huggingface.co/transformers/custom_datasets.html#seq-imdb
  2. modify the code
    
    # coding:utf-8
    """
    """

from pathlib import Path from sklearn.model_selection import train_test_split from transformers import DistilBertTokenizerFast import torch from transformers import Trainer, TrainingArguments from nlp import load_dataset from transformers import AutoTokenizer, AutoModelWithLMHead

tokenizer = AutoTokenizer.from_pretrained("bert-base-multilingual-cased")
model = AutoModelWithLMHead.from_pretrained("bert-base-multilingual-cased")

def read_imdb_split(split_dir): split_dir = Path(split_dir) texts = [] labels = [] for label_dir in ["pos", "neg"]: for text_file in (split_dir/label_dir).iterdir(): texts.append(text_file.read_text()) labels.append(0 if label_dir is "neg" else 1)

return texts, labels

train_texts, train_labels = read_imdb_split('dataset/aclImdb/train') test_texts, test_labels = read_imdb_split('dataset/aclImdb/test') train_texts, val_texts, train_labels, val_labels = train_test_split(train_texts, train_labels, test_size=.2)

train_encodings = tokenizer(train_texts, truncation=True, padding=True, max_length=100) val_encodings = tokenizer(val_texts, truncation=True, padding=True, max_length=100) test_encodings = tokenizer(test_texts, truncation=True, padding=True, max_length=100)

class IMDbDataset(torch.utils.data.Dataset): def init(self, encodings, labels): self.encodings = encodings self.labels = labels

def __getitem__(self, idx):
    item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
    item['labels'] = torch.tensor(self.labels[idx])
    return item

def __len__(self):
    return len(self.labels)

train_dataset = IMDbDataset(train_encodings, train_labels) val_dataset = IMDbDataset(val_encodings, val_labels) test_dataset = IMDbDataset(test_encodings, test_labels)

training_args = TrainingArguments( output_dir='./results', num_train_epochs=1, per_device_train_batch_size=16, per_device_eval_batch_size=64, warmup_steps=500, weight_decay=0.01, evaluate_during_training=True, logging_dir='./logs', )

trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=test_dataset )

trainer.train()

3. the error info

ValueError: Expected input batch_size (1600) to match target batch_size (16).



<!-- If you have code snippets, error messages, stack traces please provide them here as well.
     Important! Use code tags to correctly format your code. See https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting
     Do not use screenshots, as they are hard to read and (more importantly) don't allow others to copy-and-paste your code.-->

## Expected behavior

<!-- A clear and concise description of what you would expect to happen. -->
sgugger commented 3 years ago

It looks like you are using a model for language modeling (AutoModelWithLMHead) instead of a model for sequence classification (AutoModelForSequenceClassification) which is why you have that shape error.

baiziyuandyufei commented 3 years ago

@sgugger thank you! I modify my code, then everything well.

# coding:utf-8
"""
"""

from pathlib import Path
from sklearn.model_selection import train_test_split
import torch
from transformers import Trainer, TrainingArguments
from nlp import load_dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification

tokenizer = AutoTokenizer.from_pretrained("model/bert-base-multilingual-cased")  
model = AutoModelForSequenceClassification.from_pretrained("model/bert-base-multilingual-cased")

def read_imdb_split(split_dir):
    split_dir = Path(split_dir)
    texts = []
    labels = []
    for label_dir in ["pos", "neg"]:
        for text_file in (split_dir/label_dir).iterdir():
            texts.append(text_file.read_text())
            labels.append(0 if label_dir is "neg" else 1)

    return texts, labels

train_texts, train_labels = read_imdb_split('dataset/ChnSentiCorp')
test_texts, test_labels = read_imdb_split('dataset/ChnSentiCorp')
train_texts, val_texts, train_labels, val_labels = train_test_split(train_texts, train_labels, test_size=.2)

train_encodings = tokenizer(train_texts, truncation=True, padding=True, max_length=100, verbose=False)
val_encodings = tokenizer(val_texts, truncation=True, padding=True, max_length=100, verbose=False)
test_encodings = tokenizer(test_texts, truncation=True, padding=True, max_length=100, verbose=False)

class IMDbDataset(torch.utils.data.Dataset):
    def __init__(self, encodings, labels):
        self.encodings = encodings
        self.labels = labels

    def __getitem__(self, idx):
        item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
        item['labels'] = torch.tensor(self.labels[idx])
        return item

    def __len__(self):
        return len(self.labels)

train_dataset = IMDbDataset(train_encodings, train_labels)
val_dataset = IMDbDataset(val_encodings, val_labels)
test_dataset = IMDbDataset(test_encodings, test_labels)

training_args = TrainingArguments(
    output_dir='./results',
    num_train_epochs=16,
    per_device_train_batch_size=1,
    per_device_eval_batch_size=1,
    warmup_steps=500,
    weight_decay=0.01,
    evaluate_during_training=True,
    logging_dir='./logs',
)

trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=train_dataset,
    eval_dataset=test_dataset
)

trainer.train()
Epoch:   0%|                                                                              | 0/16 [00:00<?, ?it/s]
Iteration:   2%|█▏                                                           | 65/3200 [03:04<2:26:14,  2.80s/it]