or just like any other PyTorch model, but using appropriate names (and save the model’s config as well) so as to be able to load your saved checkpoint with from_pretrained method
import torch
from transformers import WEIGHTS_NAME, CONFIG_NAME
from pathlib import Path
if isinstance(model_checkpoint_path, str):
model_checkpoint_path = Path(model_checkpoint_path)
torch.save(model.state_dict(), model_checkpoint_path/WEIGHTS_NAME)
model.config.to_json_file(model_checkpoint_path/CONFIG_NAME)
tokenizer.save_vocabulary(model_checkpoint_path)
loading model
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint_path, num_labels=num_labels)
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint_path)
using model
inputs = ['example text number 1', 'another example', ]
enc = tokenizer(inputs)
model.eval();
with torch.no_grad():
output = model(**enc)
_, pred = torch.max(output[0], dim=1)