I set up a different batch_size, but the time of data processing has not changed

def tokenize_function(example):
    return tokenizer(example["sentence1"], truncation=True, max_length = 512)
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True, batch_size = 1024)
tokenized_datasets = tokenized_datasets.remove_columns(["sentence1"])
tokenized_datasets.set_format("torch")