Dynamic padding not working for audio custom dataset

Following-up for this post since no solution has been post yet.

My code is very similar to the post above:

def map_speech_to_array(batch):
    """
    map the wav file to audio signals

    :param batch: the loaded dataset, with audio file location as "column"
    :type batch: datasets.dataset_dict.DatasetDict
    """
    speech_array, sampling_rate = sf.read(batch["audio_loc"])
    batch["speech"] = speech_array
    batch["sampling_rate"] = sampling_rate
    batch["audio_loc"] = batch["audio_loc"]
    batch["text"] = batch["text"]
    return batch


def prepare_dataset(batch):
    """
    data preprocess with Wav2Vec customized processor

    :param batch: the loaded dataset
    :type batch: datasets.dataset_dict.DatasetDict
    :param processor: the customized 
    :type processor: transformers.models.wav2vec2.processing_wav2vec2.Wav2Vec2Processor
    """
    batch["input_values"] = processor(batch["speech"], sampling_rate=batch["sampling_rate"]).input_values
    with processor.as_target_processor():
        labels = processor(batch["text"]).input_ids
        batch["labels"] = labels
    return batch

I also have this class borrowed from HuggingFace blog post on fine-tuning Wav2Vec:

@dataclass
class DataCollatorCTCWithPadding:
    """
    Data collator that will dynamically pad the inputs received.
    Args:
        processor (:class:`~transformers.Wav2Vec2Processor`)
            The processor used for proccessing the data.
        padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
            Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
            among:
            * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
              sequence if provided).
            * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
              maximum acceptable input length for the model if that argument is not provided.
            * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
              different lengths).
        max_length (:obj:`int`, `optional`):
            Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
        max_length_labels (:obj:`int`, `optional`):
            Maximum length of the ``labels`` returned list and optionally padding length (see above).
        pad_to_multiple_of (:obj:`int`, `optional`):
            If set will pad the sequence to a multiple of the provided value.
            This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
            7.5 (Volta).
    """

    processor: Wav2Vec2Processor
    padding: Union[bool, str] = True
    max_length: Optional[int] = None
    max_length_labels: Optional[int] = None
    pad_to_multiple_of: Optional[int] = None
    pad_to_multiple_of_labels: Optional[int] = None

    def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
        # split inputs and labels since they have to be of different lenghts and need
        # different padding methods
        input_features = [{"input_values": feature["input_values"]} for feature in features]
        label_features = [{"input_ids": feature["labels"]} for feature in features]
        batch = self.processor.pad(
            input_features,
            padding=self.padding,
            max_length=self.max_length,
            pad_to_multiple_of=self.pad_to_multiple_of,
            return_tensors="pt",
        )
        with self.processor.as_target_processor():
            labels_batch = self.processor.pad(
                label_features,
                padding=self.padding,
                max_length=self.max_length_labels,
                pad_to_multiple_of=self.pad_to_multiple_of_labels,
                return_tensors="pt",
            )
        # replace padding with -100 to ignore loss correctly
        labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
        batch["labels"] = labels
        return batch

However functions above did not work on custom dataset with Trainer arguments:

model = Wav2Vec2ForCTC.from_pretrained(
      "facebook/wav2vec2-base", 
      ctc_loss_reduction="mean", 
      pad_token_id=processor.tokenizer.pad_token_id)
model.freeze_feature_extractor()
data_collator = DataCollatorCTCWithPadding(
    processor=processor, padding=True)
training_args = TrainingArguments(
    output_dir="../fine-tuned/wav2vec",
    group_by_length=True,
    per_device_train_batch_size=32,
    evaluation_strategy="steps",
    num_train_epochs=30,
    fp16=True,
    gradient_checkpointing=True, 
    save_steps=500,
    eval_steps=500,
    logging_steps=500,
    learning_rate=1e-4,
    weight_decay=0.005,
    warmup_steps=1000,
    save_total_limit=2,
    push_to_hub=False)
trainer = Trainer(
    model=model,
    data_collator=data_collator,
    args=training_args,
    compute_metrics=compute_metrics,
    train_dataset=adr_con_train,
    eval_dataset=adr_con_test,
    tokenizer=processor.feature_extractor)

. I used the following code for investigation:

train_dt =train_dt.map(map_speech_to_array)
train_dt = train_dt.map(prepare_dataset)
input_features = []
label_features = []
for i, item in enumerate(train_dt):
  input_features.append({"input_values":train_dt[i]["input_values"]})
  label_features.append({"input_ids":train_dt[i]["labels"]})
print(len(label_features[0]["input_ids"]))
batch = processor.pad(
  input_features,
  padding=True,
  return_tensors="pt")
print(batch)
with processor.as_target_processor():
  labels_batch = processor.pad(
      label_features,
      padding=True,
      return_tensors="pt")

But I got the following error when padding labels:

ValueError: could not broadcast input array from shape (847204,) into shape (1,)

During handling of the above exception, another exception occurred:
....
ValueError: Unable to create tensor, you should probably activate padding with 'padding=True' to have batched tensors with the same length.

I noticed there are array shape differences as mentioned in the very first post. So I changed prepare_data function into the following:

def prepare_dataset(batch):
    """
    data preprocess with Wav2Vec customized processor

    :param batch: the loaded dataset
    :type batch: datasets.dataset_dict.DatasetDict
    :param processor: the customized 
    :type processor: transformers.models.wav2vec2.processing_wav2vec2.Wav2Vec2Processor
    """
    batch["input_values"] = processor(
        batch["speech"], sampling_rate=batch["sampling_rate"]).input_values[0]
    with processor.as_target_processor():
        labels = processor(batch["text"]).input_ids
        batch["labels"] = labels
    return batch

Then the error was thrown when doing padding on labels again:

 if padding_strategy != PaddingStrategy.DO_NOT_PAD and (not self.pad_token or self.pad_token_id < 0):
TypeError: '<' not supported between instances of 'NoneType' and 'int'

Any suggestions on how to solve this issue? Thanks in advance.