Machine Translation using Hugging Face problem

I am trying to create a custom Machine translation model using tensorflow and keras. I am trying to replicate a similar way for Machine Translation as I did you my NLP Classification task

tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
def tokenize_func(dataset):
  attention_mask_list = []
  input_id_list = []
  token_id_list = []
  for i in dataset.index:
    encoded = tokenizer(dataset.loc[i]["Title"], max_length=28, padding="max_length",truncation=True)
    input_id_list.append(encoded["input_ids"])
    attention_mask_list.append(encoded["attention_mask"])
    token_id_list.append(encoded["token_type_ids"])
  return(np.array(input_id_list), np.array(token_id_list), np.array(attention_mask_list))


train_input_id, train_token_id, train_attention_mask = tokenize_func(train)
test_input_id, test_token_id, test_attention_mask = tokenize_func(test)
val_input_id, val_token_id, val_attention_mask = tokenize_func(val)
bert_model = TFBertModel.from_pretrained('bert-base-uncased')

##params###
opt = Adam(learning_rate=1e-4, decay=1e-7)
loss = tf.keras.losses.CategoricalCrossentropy()
accuracy = tf.keras.metrics.CategoricalAccuracy()


input_ids = tf.keras.Input(shape=(28,),dtype='int32')
token_ids = tf.keras.Input(shape=(28,),dtype='int32')
attention_masks = tf.keras.Input(shape=(28,),dtype='int32')
embeddings = bert_model([input_ids, token_ids,attention_masks])[1]
dense_layer = tf.keras.layers.Dense(40, activation="relu")(embeddings)
output = tf.keras.layers.Dense(len(label_map), activation="softmax")(dense_layer)
bert_custom = tf.keras.models.Model(inputs = [input_ids, token_ids, attention_masks], outputs = output)
bert_custom.compile(opt, loss=loss, metrics=accuracy)
bert_custom.fit([train_input_id, train_token_id, train_attention_mask], y_train, validation_data=([val_input_id, val_token_id, val_attention_mask], y_val), epochs=15, batch_size=32)

Now what my Machine Translation model looks like

tokenizer = T5TokenizerFast.from_pretrained("t5-small")
translation_model = T5ForConditionalGeneration.from_pretrained("t5-small")
def tokenizing(dataset):
    input_ids_list = []
    attention_mask_list = []
    labels_list = []
    for i in dataset.index:
        encoded = tokenizer(dataset.loc[i]["input"], text_target = dataset.loc[i]["output"], padding="max_length",max_length=128, truncation=True, return_tensors='pt')
        input_ids_list.append(encoded["input_ids"])
        attention_mask_list.append(encoded["attention_mask"])
        labels_list.append(encoded["labels"])
    return(np.array(input_ids_list),np.array(attention_mask_list),np.array(labels_list))

#train_df = tokenizing(dataset.head(8000))
#val_df = tokenizing(dataset.tail(2000))

train_input_ids_list, train_attention_mask_list,  train_labels_list = tokenizing(dataset.head(8000))
val_input_ids_list, val_attention_mask_list,  val_labels_list = tokenizing(dataset.tail(2000))

translation_model([train_input_ids_list, train_attention_mask_list], labels=train_labels_list)

But this is showing error : - 974 elif input_ids is not None:
→ 975 input_shape = input_ids.size()
976 input_ids = input_ids.view(-1, input_shape[-1])
977 elif inputs_embeds is not None:

AttributeError: ‘list’ object has no attribute ‘size’

Please help me out