The model summarizes all the data to the same answer after training

I used a pretrained summarization model to fine tune it on my own dataset.
During training, the rouge score is 0.000 every epoch. !!
After training, the model predicts (summarizes) all the samples to the same sentence, in other words, it predicts the same sentence on any article or any text I would like to summarize it.
This is my code:

from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, DataCollatorForSeq2Seq
import evaluate

rouge = evaluate.load("rouge")

model_checkpoint = "any-checkpoint"

tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)

from transformers import Seq2SeqTrainingArguments, Seq2SeqTrainer
batch_size = 16
num_train_epochs = 2

logging_steps = len(tokenized_datasets["train"]) // batch_size
output_dir = "output_dir "

args = Seq2SeqTrainingArguments(
    output_dir=output_dir,
    overwrite_output_dir=True,
    evaluation_strategy="epoch",
    save_strategy="epoch",
    learning_rate=0.01,
    per_device_train_batch_size=batch_size,
    per_device_eval_batch_size=batch_size,
    weight_decay=0.01,
    save_total_limit=1,
    num_train_epochs=num_train_epochs,
    predict_with_generate=True,
    logging_steps=logging_steps,
    load_best_model_at_end=True,
    lr_scheduler_type = 'reduce_lr_on_plateau',
    # resume_from_checkpoint=True,
    # push_to_hub=True
)

trainer = Seq2SeqTrainer(
    model,
    args,
    train_dataset=tokenized_datasets["train"],
    eval_dataset=tokenized_datasets["test"],
    data_collator=data_collator,
    tokenizer=tokenizer,
    compute_metrics=compute_metric,
)

trainer.train()

trainer.evaluate()