TimeoutError [100060]?

updated it and still had that git problem, now downloading the dataset via git clone.

I wonder if the structure of my dataset that is too rough, or that mask isn’t quite binary enough so it’s still big for usual internet bandwidth, let me check that too

1 Like

Oh, there’s a possibility that there are bugs in the less-used functions in the datasets library, or that the datasets aren’t in the expected order…
The HF library tends to rely on the directory structure and file names.
Well, if you can download it with git, then there shouldn’t be a problem with network speed or quality.

It looks like you’re facing a connection timeout. Since you’ve checked your internet and reset the token, try verifying the server status or switching networks. Also, check if firewalls or antivirus software are blocking the connection. If the issue continues, the server might be temporarily down.

1 Like

@John6666 @DylanAndrew Good news, the training section can run a bit after reinstalling miniconda and cleaning some stuffs like .cache folder. I also do updating and now connected to ethernet cable.

The error is now something is out of bounds, maybe I don’t know if it’s the dataset but I made sure they align and have the same number before converting them to dataset

here’s the error


{
	"name": "IndexError",
	"message": "Target 2 is out of bounds.",
	"stack": "---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
Cell In[18], line 12
      2 from transformers import Trainer
      4 trainer = Trainer(
      5     model=model,
      6     args=training_args,
   (...)
      9     compute_metrics=compute_metrics,
     10 )
---> 12 trainer.train()

File c:\\Users\\Lenovo\\miniconda3\\envs\\HUGGINGFACE-PRETRAIN\\Lib\\site-packages\\transformers\\trainer.py:2114, in Trainer.train(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)
   2111 try:
   2112     # Disable progress bars when uploading models during checkpoints to avoid polluting stdout
   2113     hf_hub_utils.disable_progress_bars()
-> 2114     return inner_training_loop(
   2115         args=args,
   2116         resume_from_checkpoint=resume_from_checkpoint,
   2117         trial=trial,
   2118         ignore_keys_for_eval=ignore_keys_for_eval,
   2119     )
   2120 finally:
   2121     hf_hub_utils.enable_progress_bars()

File c:\\Users\\Lenovo\\miniconda3\\envs\\HUGGINGFACE-PRETRAIN\\Lib\\site-packages\\transformers\\trainer.py:2481, in Trainer._inner_training_loop(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval)
   2475 context = (
   2476     functools.partial(self.accelerator.no_sync, model=model)
   2477     if i != len(batch_samples) - 1
   2478     else contextlib.nullcontext
   2479 )
   2480 with context():
-> 2481     tr_loss_step = self.training_step(model, inputs, num_items_in_batch)
   2483 if (
   2484     args.logging_nan_inf_filter
   2485     and not is_torch_xla_available()
   2486     and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step))
   2487 ):
   2488     # if loss is nan or inf simply add the average of previous logged losses
   2489     tr_loss = tr_loss + tr_loss / (1 + self.state.global_step - self._globalstep_last_logged)

File c:\\Users\\Lenovo\\miniconda3\\envs\\HUGGINGFACE-PRETRAIN\\Lib\\site-packages\\transformers\\trainer.py:3579, in Trainer.training_step(self, model, inputs, num_items_in_batch)
   3576     return loss_mb.reduce_mean().detach().to(self.args.device)
   3578 with self.compute_loss_context_manager():
-> 3579     loss = self.compute_loss(model, inputs, num_items_in_batch=num_items_in_batch)
   3581 del inputs
   3582 if (
   3583     self.args.torch_empty_cache_steps is not None
   3584     and self.state.global_step % self.args.torch_empty_cache_steps == 0
   3585 ):

File c:\\Users\\Lenovo\\miniconda3\\envs\\HUGGINGFACE-PRETRAIN\\Lib\\site-packages\\transformers\\trainer.py:3633, in Trainer.compute_loss(self, model, inputs, return_outputs, num_items_in_batch)
   3631         loss_kwargs[\"num_items_in_batch\"] = num_items_in_batch
   3632     inputs = {**inputs, **loss_kwargs}
-> 3633 outputs = model(**inputs)
   3634 # Save past state if it exists
   3635 # TODO: this needs to be fixed and made cleaner later.
   3636 if self.args.past_index >= 0:

File c:\\Users\\Lenovo\\miniconda3\\envs\\HUGGINGFACE-PRETRAIN\\Lib\\site-packages\\torch\
n\\modules\\module.py:1736, in Module._wrapped_call_impl(self, *args, **kwargs)
   1734     return self._compiled_call_impl(*args, **kwargs)  # type: ignore[misc]
   1735 else:
-> 1736     return self._call_impl(*args, **kwargs)

File c:\\Users\\Lenovo\\miniconda3\\envs\\HUGGINGFACE-PRETRAIN\\Lib\\site-packages\\torch\
n\\modules\\module.py:1747, in Module._call_impl(self, *args, **kwargs)
   1742 # If we don't have any hooks, we want to skip the rest of the logic in
   1743 # this function, and just call forward.
   1744 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
   1745         or _global_backward_pre_hooks or _global_backward_hooks
   1746         or _global_forward_hooks or _global_forward_pre_hooks):
-> 1747     return forward_call(*args, **kwargs)
   1749 result = None
   1750 called_always_called_hooks = set()

File c:\\Users\\Lenovo\\miniconda3\\envs\\HUGGINGFACE-PRETRAIN\\Lib\\site-packages\\transformers\\models\\segformer\\modeling_segformer.py:809, in SegformerForSemanticSegmentation.forward(self, pixel_values, labels, output_attentions, output_hidden_states, return_dict)
    807 if self.config.num_labels > 1:
    808     loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
--> 809     loss = loss_fct(upsampled_logits, labels)
    810 elif self.config.num_labels == 1:
    811     valid_mask = ((labels >= 0) & (labels != self.config.semantic_loss_ignore_index)).float()

File c:\\Users\\Lenovo\\miniconda3\\envs\\HUGGINGFACE-PRETRAIN\\Lib\\site-packages\\torch\
n\\modules\\module.py:1736, in Module._wrapped_call_impl(self, *args, **kwargs)
   1734     return self._compiled_call_impl(*args, **kwargs)  # type: ignore[misc]
   1735 else:
-> 1736     return self._call_impl(*args, **kwargs)

File c:\\Users\\Lenovo\\miniconda3\\envs\\HUGGINGFACE-PRETRAIN\\Lib\\site-packages\\torch\
n\\modules\\module.py:1747, in Module._call_impl(self, *args, **kwargs)
   1742 # If we don't have any hooks, we want to skip the rest of the logic in
   1743 # this function, and just call forward.
   1744 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
   1745         or _global_backward_pre_hooks or _global_backward_hooks
   1746         or _global_forward_hooks or _global_forward_pre_hooks):
-> 1747     return forward_call(*args, **kwargs)
   1749 result = None
   1750 called_always_called_hooks = set()

File c:\\Users\\Lenovo\\miniconda3\\envs\\HUGGINGFACE-PRETRAIN\\Lib\\site-packages\\torch\
n\\modules\\loss.py:1293, in CrossEntropyLoss.forward(self, input, target)
   1292 def forward(self, input: Tensor, target: Tensor) -> Tensor:
-> 1293     return F.cross_entropy(
   1294         input,
   1295         target,
   1296         weight=self.weight,
   1297         ignore_index=self.ignore_index,
   1298         reduction=self.reduction,
   1299         label_smoothing=self.label_smoothing,
   1300     )

File c:\\Users\\Lenovo\\miniconda3\\envs\\HUGGINGFACE-PRETRAIN\\Lib\\site-packages\\torch\
n\\functional.py:3479, in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction, label_smoothing)
   3477 if size_average is not None or reduce is not None:
   3478     reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 3479 return torch._C._nn.cross_entropy_loss(
   3480     input,
   3481     target,
   3482     weight,
   3483     _Reduction.get_enum(reduction),
   3484     ignore_index,
   3485     label_smoothing,
   3486 )

IndexError: Target 2 is out of bounds."
}

I also made sure they are on the same size because the mask is created from cropped dataset. Should I compress the images to make things lighter?

1 Like

i found it.