So there is a possibility that there is a bug in this function in the version of Transformers you are using. The reason why there is no problem with Trainer is probably because Trainer uses a redefined function, not an inherited one.
return create_commit(
repo_id=repo_id,
operations=operations,
commit_message=commit_message,
commit_description=commit_description,
token=token,
create_pr=create_pr,
revision=revision,
)
def push_to_hub(
self,
repo_id: str,
use_temp_dir: Optional[bool] = None,
commit_message: Optional[str] = None,
private: Optional[bool] = None,
token: Optional[Union[bool, str]] = None,
max_shard_size: Optional[Union[int, str]] = "5GB",
create_pr: bool = False,
safe_serialization: bool = True,
revision: str = None,
else:
self.push_in_progress.jobs.extend(push_jobs)
def _finish_current_push(self):
if not hasattr(self, "push_in_progress"):
return
if self.push_in_progress is not None and not self.push_in_progress.is_done():
logger.info("Waiting for the current checkpoint push to be finished, this might take a couple of minutes.")
self.push_in_progress.wait_until_done()
def push_to_hub(
self,
commit_message: Optional[str] = "End of training",
blocking: bool = True,
token: Optional[str] = None,
revision: Optional[str] = None,
**kwargs,
) -> str:
"""
Upload `self.model` and `self.processing_class` to the 🤗 model hub on the repo `self.args.hub_model_id`.