Overwriting the Trainer can be done as follows (this is also explained in the docs):
from torch import nn
from transformers import Trainer
class CustomTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
labels = inputs.get("labels")
# forward pass
outputs = model(**inputs)
logits = outputs.get('logits')
# compute custom loss
loss_fct = nn.CrossEntropyLoss(weight=torch.tensor([0.2, 0.3]))
loss = loss_fct(logits.view(-1, self.model.config.num_labels), labels.view(-1))
return (loss, outputs) if return_outputs else loss