AggregateScore error when computing metric

Hello. I am working on a project and have been following an online tutorial for finetuning a T5 model on a text summarization task. I am quite new to this so I may not fully grasp the concepts and what is going on here, but I have run into a problem:

        training_args = TrainingArguments(
            output_dir=self.out_dir,
            num_train_epochs=self.epochs,
            per_device_train_batch_size=self.batch_size,
            per_device_eval_batch_size=self.batch_size,
            warmup_steps=500,
            weight_decay=0.01,
            logging_dir=self.out_dir,
            logging_steps=10,
            eval_strategy='steps',
            eval_steps=200,
            save_strategy='epoch',
            save_total_limit=2,
            report_to='tensorboard',
            learning_rate=self.learning_rate,
            dataloader_num_workers=2,
            fp16=True
        )

        metrics = Metrics(self.tokenizer)

        trainer = Trainer(
            model=self.model,
            args=training_args,
            train_dataset=self.dataset_train,
            eval_dataset=self.dataset_valid,
            preprocess_logits_for_metrics=metrics.preprocess_logits_for_metrics,
            compute_metrics=metrics.compute_metrics
        )

        trainer.train()
        return trainer

When using the trainer as such, I am trying to compute the rouge metric on the evaluation set.

    def compute_metrics(self, eval_pred):
        predictions, labels = eval_pred
        predictions = predictions[0]  # In case of tuple output from the model

        decoded_preds = self.tokenizer.batch_decode(predictions, skip_special_tokens=True)
        labels = np.where(labels != -100, labels, self.tokenizer.pad_token_id)
        decoded_labels = self.tokenizer.batch_decode(labels, skip_special_tokens=True)

        result = self.rouge.compute(
            predictions=decoded_preds,
            references=decoded_labels,
            use_stemmer=True,
            rouge_types=['rouge1', 'rouge2', 'rougeL']
        )

        prediction_lens = [np.count_nonzero(pred != self.tokenizer.pad_token_id) for pred in predictions]
        result["gen_len"] = np.mean(prediction_lens)

        return {k: v for k, v in result.items()}

    def preprocess_logits_for_metrics(self, logits, labels):
        pred_ids = torch.argmax(logits[0], dim=-1)
        return pred_ids, labels

I am getting TypeError: AggregateScore.new() missing 2 required positional arguments: ‘mid’ and ‘high’ when trying to return from the compute_metrics function, more specifically on return {k: v for k, v in result.items()}.

What could be causing this and how do I solve it?

Full error stack:

 File "", line 17, in <module>
    trainer = finetuner.train()
  File "", line 86, in train
    trainer.train()
  File "", line 1885, in train
    return inner_training_loop(
  File "", line 2291, in _inner_training_loop
    self._maybe_log_save_evaluate(tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval)
  File "", line 2721, in _maybe_log_save_evaluate
    metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)
  File "", line 3572, in evaluate
    output = eval_loop(
  File "", line 3859, in evaluation_loop
    metrics = denumpify_detensorize(metrics)
  File "\venv\lib\site-packages\transformers\trainer_utils.py", line 714, in denumpify_detensorize
    return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()})
  File "\venv\lib\site-packages\transformers\trainer_utils.py", line 714, in <dictcomp>
    return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()})
  File "\venv\lib\site-packages\transformers\trainer_utils.py", line 712, in denumpify_detensorize
    return type(metrics)(denumpify_detensorize(m) for m in metrics)
TypeError: AggregateScore.__new__() missing 2 required positional arguments: 'mid' and 'high'