TrOCR sequence item 26: expected str instance, NoneType found

i use this model Transformers-Tutorials/Fine_tune_TrOCR_on_IAM_Handwriting_Database_using_Seq2SeqTrainer.ipynb at master · NielsRogge/Transformers-Tutorials · GitHub to train my dataset.
but i got this issue.sequence item 26: expected str instance, NoneType found
I only changed the path of the dataset.
This problem will occur halfway through training.

TypeError Traceback (most recent call last)
/tmp/ipykernel_6076/697618435.py in
11 data_collator=default_data_collator,
12 )
—> 13 trainer.train() #“checkpoint-17000”

/usr/local/lib/python3.8/dist-packages/transformers/trainer.py in train(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)
1390 self.control = self.callback_handler.on_step_end(args, self.state, self.control)
1391
→ 1392 self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
1393 else:
1394 self.control = self.callback_handler.on_substep_end(args, self.state, self.control)

/usr/local/lib/python3.8/dist-packages/transformers/trainer.py in _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval)
1512 metrics = None
1513 if self.control.should_evaluate:
→ 1514 metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)
1515 self._report_to_hp_search(trial, epoch, metrics)
1516

/usr/local/lib/python3.8/dist-packages/transformers/trainer_seq2seq.py in evaluate(self, eval_dataset, ignore_keys, metric_key_prefix, max_length, num_beams)
68 self._max_length = max_length if max_length is not None else self.args.generation_max_length
69 self._num_beams = num_beams if num_beams is not None else self.args.generation_num_beams
—> 70 return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
71
72 def predict(

/usr/local/lib/python3.8/dist-packages/transformers/trainer.py in evaluate(self, eval_dataset, ignore_keys, metric_key_prefix)
2149
2150 eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
→ 2151 output = eval_loop(
2152 eval_dataloader,
2153 description=“Evaluation”,

/usr/local/lib/python3.8/dist-packages/transformers/trainer.py in evaluation_loop(self, dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix)
2390 # Metrics!
2391 if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
→ 2392 metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
2393 else:
2394 metrics = {}

/tmp/ipykernel_6076/2700917557.py in compute_metrics(pred)
3 pred_ids = pred.predictions
4
----> 5 pred_str = processor.batch_decode(pred_ids, skip_special_tokens=True)
6 labels_ids[labels_ids == -100] = processor.tokenizer.pad_token_id
7 label_str = processor.batch_decode(labels_ids, skip_special_tokens=True)

/usr/local/lib/python3.8/dist-packages/transformers/models/trocr/processing_trocr.py in batch_decode(self, *args, **kwargs)
123 information.
124 “”"
→ 125 return self.tokenizer.batch_decode(*args, **kwargs)
126
127 def decode(self, *args, **kwargs):

/usr/local/lib/python3.8/dist-packages/transformers/tokenization_utils_base.py in batch_decode(self, sequences, skip_special_tokens, clean_up_tokenization_spaces, **kwargs)
3188 :obj:List[str]: The list of decoded sentences.
3189 “”"
→ 3190 return [
3191 self.decode(
3192 seq,

/usr/local/lib/python3.8/dist-packages/transformers/tokenization_utils_base.py in (.0)
3189 “”"
3190 return [
→ 3191 self.decode(
3192 seq,
3193 skip_special_tokens=skip_special_tokens,

/usr/local/lib/python3.8/dist-packages/transformers/tokenization_utils_base.py in decode(self, token_ids, skip_special_tokens, clean_up_tokenization_spaces, **kwargs)
3227 token_ids = to_py_obj(token_ids)
3228
→ 3229 return self._decode(
3230 token_ids=token_ids,
3231 skip_special_tokens=skip_special_tokens,

/usr/local/lib/python3.8/dist-packages/transformers/tokenization_utils.py in _decode(self, token_ids, skip_special_tokens, clean_up_tokenization_spaces, spaces_between_special_tokens, **kwargs)
930 current_sub_text.append(token)
931 if current_sub_text:
→ 932 sub_texts.append(self.convert_tokens_to_string(current_sub_text))
933
934 if spaces_between_special_tokens:

/usr/local/lib/python3.8/dist-packages/transformers/models/gpt2/tokenization_gpt2.py in convert_tokens_to_string(self, tokens)
262 def convert_tokens_to_string(self, tokens):
263 “”“Converts a sequence of tokens (string) in a single string.”""
→ 264 text = “”.join(tokens)
265 text = bytearray([self.byte_decoder[c] for c in text]).decode(“utf-8”, errors=self.errors)
266 return text

TypeError: sequence item 21: expected str instance, NoneType found