Trainer() doesnt print any output or eval during training

I am following the notebook to fine-tune roberta on MLM task:

However, when I am running trainer.train(),
the standard output of eval metrics during training is not showed.
Also there is nothing in wandb.
Any ideas?
This is the code:

def tokenize_function(examples):
return tokenizer(examples[“text”])
def group_texts(examples, block_size = 514):

    concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
    total_length = len(concatenated_examples[list(examples.keys())[0]])

    total_length = (total_length // block_size) * block_size

    result = {
            k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
            for k, t in concatenated_examples.items()
    }
    result["labels"] = result["input_ids"].copy()
    return result

datasets = load_dataset(‘wikitext’, ‘wikitext-2-raw-v1’)
model_checkpoint = “distilroberta-base”
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True)
tokenized_datasets = datasets.map(tokenize_function, batched=True, num_proc=4, remove_columns=[“text”])
model = AutoModelForMaskedLM.from_pretrained(model_checkpoint)
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
batch_size=514,
num_proc=4,
load_from_cache_file=False
)

numBatch = 8
numEpoch = 2
numStep = tokenized_datasets.num_rows[‘train’] * numEpoch / numBatch
WANDB_API_KEY = “…”
out_dir = “/mlm1/”
args = TrainingArguments(
run_name=“robert_mlm1”,
num_train_epochs=numEpoch,
per_device_train_batch_size=numBatch,
per_device_eval_batch_size=numBatch*4,
gradient_accumulation_steps=2,
learning_rate=2e-5,
weight_decay=1e-2,
warmup_ratio=1e-2,
warmup_steps=int(numStep/100),
save_total_limit=3,
save_steps=200,
logging_steps=4,
eval_steps=4,
do_eval=True,
save_strategy=‘steps’,
evaluation_strategy=‘steps’,
output_dir= out_dir + “weights”,
logging_dir= out_dir + “logs”,
seed=23,
report_to=‘wandb’,
load_best_model_at_end=True
)

trainer = Trainer(
model=model,
args=args,

train_dataset=lm_datasets["train"],
eval_dataset=lm_datasets["validation"],

)

trainer.train()