I am trying to build a multi-label, multi-class classification model. Any input text can have zero or more labels, up to 11 possible classes.
I have been trying to use the problem_type="multi_label_classification"
and everything looks OK, but I get ValueError: Target size (torch.Size([16, 11])) must be the same as input size (torch.Size([16, 2]))
when it tries to calculate the binary_cross_entropy_with_logits
I presume my data is in the wrong shape somehow, but I can’t see where exactly. Any suggestions?
transformers==4.8.2
Here is a minimal example:
import torch
from torch.utils.data.dataset import Dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification
# Example data.
# In reality, the strings are usually longer and there are 11 possible classes
texts = [
"This is the first sentence.",
"This is the second sentence.",
"This is another sentence.",
"Finally, the last sentence.",
]
labels = [
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
]
train_texts = texts[:2]
train_labels = labels[:2]
eval_texts = texts[2:]
eval_labels = labels[2:]
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
train_encodings = tokenizer(train_texts, padding="max_length", truncation=True, max_length=512)
eval_encodings = tokenizer(eval_texts, padding="max_length", truncation=True, max_length=512)
class TextClassifierDataset(Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = torch.tensor(self.labels[idx])
return item
train_dataset = TextClassifierDataset(train_encodings, train_labels)
eval_dataset = TextClassifierDataset(eval_encodings, eval_labels)
model = AutoModelForSequenceClassification.from_pretrained(
"bert-base-uncased",
problem_type="multi_label_classification",
)
training_arguments = TrainingArguments(
output_dir=".",
evaluation_strategy="epoch",
per_device_train_batch_size=16,
per_device_eval_batch_size=16,
num_train_epochs=1,
)
trainer = Trainer(
model=model,
args=training_arguments,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
trainer.train()
# long traceback, but here is the important bit...
~/python3.8/site-packages/torch/nn/modules/loss.py in forward(self, input, target)
711
712 def forward(self, input: Tensor, target: Tensor) -> Tensor:
--> 713 return F.binary_cross_entropy_with_logits(input, target,
714 self.weight,
715 pos_weight=self.pos_weight,
~/python3.8/site-packages/torch/nn/functional.py in binary_cross_entropy_with_logits(input, target, weight, size_average, reduce, reduction, pos_weight)
2956
2957 if not (target.size() == input.size()):
-> 2958 raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
2959
2960 return torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction_enum)
ValueError: Target size (torch.Size([16, 11])) must be the same as input size (torch.Size([16, 2]))