Error while optimizing seq2seq model using optimum

I could not reproduce this example
transformers==4.42.4
torch== 2.4.0+cpu
onnx==1.16.2
onnxruntime==1.18.1
optimum==1.21.2

from transformers import AutoTokenizer
from optimum.onnxruntime import  OptimizationConfig, ORTOptimizer, ORTModelForSeq2SeqLM
model_id = "sshleifer/distilbart-cnn-12-6"
save_dir = "distilbart_optimized"

# Load a PyTorch model and export it to the ONNX format
model = ORTModelForSeq2SeqLM.from_pretrained(model_id, export=True)

# Create the optimizer
optimizer = ORTOptimizer.from_pretrained(model)

# Define the optimization strategy by creating the appropriate configuration
optimization_config = OptimizationConfig(
    optimization_level=2,
    enable_transformers_specific_optimizations=True,
    optimize_for_gpu=False,
)

# Optimize the model
optimizer.optimize(save_dir=save_dir, optimization_config=optimization_config)
tokenizer = AutoTokenizer.from_pretrained(model_id)
optimized_model = ORTModelForSeq2SeqLM.from_pretrained(save_dir)
tokens = tokenizer("This is a sample input", return_tensors="pt")
outputs = optimized_model.generate(**tokens)

@LearnToGrow Can you try the solution suggested here?