`KeyError: ‘eval_loss’ when using Trainer with ViTModel and ViTForImageClassification

When I try to run ‘ViTForImageClassification’ with a Trainer object, it reaches the end of the eval before throwing KeyError: 'eval_loss' (full traceback below).

This is Transformers 4.9.0

class ViTForImageClassification(nn.Module):
    #define architecture
    def __init__(self, num_labels=len(string_labels)):
        super(ViTForImageClassification, self).__init__()
        self.vit = ViTModel.from_pretrained('google/vit-base-patch16-224-in21k')
        self.dropout = nn.Dropout(0.1)
        self.classifier = nn.Linear(self.vit.config.hidden_size, num_labels)
        self.num_labels = num_labels
    #define a forward pass through that architecture + loss computation
    def forward(self, pixel_values, labels):
        outputs = self.vit(pixel_values=pixel_values)
        output = self.dropout(outputs.last_hidden_state[:,0])
        logits = self.classifier(output)
        loss = None
        if labels is not None:
          loss_fct = nn.CrossEntropyLoss()
          loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))

        return SequenceClassifierOutput(
            loss=loss,
            logits=logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

args = TrainingArguments(
    output_dir="output",
    evaluation_strategy="steps",
    eval_steps=100,
    per_device_train_batch_size=6,
    per_device_eval_batch_size=6,
    num_train_epochs=3,
    seed=0,
    load_best_model_at_end=True,
   #metric_for_best_model=metric_name,
    logging_dir='logs',
)

def compute_metrics(p):
    pred, labels = p
    pred = np.argmax(pred, axis=1)
    accuracy = accuracy_score(y_true=labels, y_pred=pred)
    recall = recall_score(y_true=labels, y_pred=pred)
    precision = precision_score(y_true=labels, y_pred=pred)
    f1 = f1_score(y_true=labels, y_pred=pred)
    return {"accuracy": accuracy, "precision": precision, "recall": recall, "f1": f1}

trainer = Trainer(
    model = model,
    args = args,
    train_dataset = preprocessed_train_ds,
    eval_dataset = preprocessed_val_ds,
    compute_metrics = compute_metrics,
)

Full Traceback:

---------------------------------------------------------------------------
KeyError                                  Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_1060/49973641.py in <module>
----> 1 trainer.train()

~\Anaconda3\envs\transformers\lib\site-packages\transformers\trainer.py in train(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)
   1334                     self.control = self.callback_handler.on_step_end(args, self.state, self.control)
   1335 
-> 1336                     self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
   1337 
   1338                 if self.control.should_epoch_stop or self.control.should_training_stop:

~\Anaconda3\envs\transformers\lib\site-packages\transformers\trainer.py in _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval)
   1439 
   1440         if self.control.should_save:
-> 1441             self._save_checkpoint(model, trial, metrics=metrics)
   1442             self.control = self.callback_handler.on_save(self.args, self.state, self.control)
   1443 

~\Anaconda3\envs\transformers\lib\site-packages\transformers\trainer.py in _save_checkpoint(self, model, trial, metrics)
   1541             if not metric_to_check.startswith("eval_"):
   1542                 metric_to_check = f"eval_{metric_to_check}"
-> 1543             metric_value = metrics[metric_to_check]
   1544 
   1545             operator = np.greater if self.args.greater_is_better else np.less

KeyError: 'eval_loss'