What outputs are defined when using custom compute_loss?

I’m writing a custom trainer, but if I don’t write None in outputs, compute_metrics won’t read it correctly.
like this

in compute_loss
outputs = None,
torch.argmax(type_logit, dim = 1),
torch.argmax(polarity_logit, dim = 1),
torch.argmax(tense_logit, dim = 1),
torch.argmax(certainty_logit, dim = 1)

in compute_metrics
pred.predictions → 4 size

in compute_loss
outputs = torch.argmax(type_logit, dim = 1),
torch.argmax(polarity_logit, dim = 1),
torch.argmax(tense_logit, dim = 1),
torch.argmax(certainty_logit, dim = 1)

in compute_metrics
pred.predictions → 3 size

class CustomTrainer(Trainer):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

    def compute_loss(self, model, inputs, return_outputs=False):
        criterion = {
            'type' : nn.CrossEntropyLoss().to(device),
            'polarity' : nn.CrossEntropyLoss().to(device),
            'tense' : nn.CrossEntropyLoss().to(device),
            'certainty' : nn.CrossEntropyLoss().to(device)
        }
        # forward pass
        labels = inputs.pop("labels").to(torch.int64)
        
        type_logit, polarity_logit, tense_logit, certainty_logit = model(**inputs)
        loss = criterion['type'](type_logit, labels[::, 0]) + \
                    criterion['polarity'](polarity_logit, labels[::, 1]) + \
                    criterion['tense'](tense_logit,labels[::, 2]) + \
                    criterion['certainty'](certainty_logit, labels[::, 3])

        outputs = None, \
                    torch.argmax(type_logit, dim = 1), \
                    torch.argmax(polarity_logit, dim = 1),\
                    torch.argmax(tense_logit, dim = 1),\
                    torch.argmax(certainty_logit, dim = 1)
        return (loss, outputs) if return_outputs else loss
    
trainer = CustomTrainer(
    model=model,
    args=args,
    train_dataset=train_dataset,                      # 학습데이터
    eval_dataset=val_dataset,                        # validation 데이터
    compute_metrics=compute_metrics,                       # 모델 평가 방식
    callbacks=[EarlyStoppingCallback(early_stopping_patience=stop)],)
def compute_metrics(pred):
    labels = pred.label_ids
    preds = pred.predictions
    ans = []
    for i in range(4):
        f1 = f1_score(labels[::, i], preds[i], average='weighted')
        ans.append(f1)
    return {
        'f1 유형': ans[0],
        'f1 극성': ans[1],
        'f1 시제': ans[2],
        'f1 확실': ans[3],
    }