How about this…? (Trainer)
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
import numpy as np
import evaluate
# 1) Data & tokenizer
raw_datasets = load_dataset("glue", "mrpc")
checkpoint = "bert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
def tokenize_function(example):
return tokenizer(example["sentence1"], example["sentence2"], truncation=True)
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
metric = evaluate.load("glue", "mrpc") # metric loaded once outside for efficiency
def compute_metrics(eval_preds):
preds = np.argmax(eval_preds.predictions, axis=-1)
labels = eval_preds.label_ids
return metric.compute(predictions=preds, references=labels)
training_args = TrainingArguments(output_dir="test_trainer")
model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
tokenizer=tokenizer,
compute_metrics=compute_metrics,
)
trainer.train()