FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead

How to fix this deprecated model?

(file:///C:/Users/ai/AppData/Roaming/Python/Python39/site-packages/transformers/optimization.py:306): FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set no_deprecation_warning=True to disable this warning

param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [{
    'params':
    [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
    'weight_decay_rate':
    0.01
}, {
    'params':
    [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
    'weight_decay_rate':
    0.0
}]
optimizer = AdamW(optimizer_grouped_parameters, lr=1e-5)

step = 0
best_acc = 0
epoch = 10
writer = SummaryWriter(log_dir='model_best')
for epoch in tqdm(range(epoch)):
    for idx, batch in tqdm(enumerate(train_loader),
                           total=len(train_texts) // batch_size,
                           leave=False):
        optimizer.zero_grad()
        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        labels = batch['labels'].to(device)
        outputs = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
        loss = outputs[0]  # Calculate Loss
        logging.info(
            f'Epoch-{epoch}, Step-{step}, Loss: {loss.cpu().detach().numpy()}')
        step += 1
        loss.backward()
        optimizer.step()
        writer.add_scalar('train_loss', loss.item(), step)
    logging.info(f'Epoch {epoch}, present best acc: {best_acc}, start evaluating.')
    accuracy, precision, recall, f1 = eval_model(model, eval_loader)  # Evaluate Model
    writer.add_scalar('dev_accuracy', accuracy, step)
    writer.add_scalar('dev_precision', precision, step)
    writer.add_scalar('dev_recall', recall, step)
    writer.add_scalar('dev_f1', f1, step)
    if accuracy > best_acc:
        model.save_pretrained('model_best')  # Save Model
        tokenizer.save_pretrained('model_best')
        best_acc = accuracy

comment out

#from transformers import AdamW

change the optimizer to PyTorch AdamW implementation

optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=1e-5)
1 Like

I got the same error but i didnt import adam in the first place please help me im a beginner

training_args = TrainingArguments(
output_dir=‘/content/drive/MyDrive/models/results’,
num_train_epochs=5,
per_device_train_batch_size=16,
save_steps=500,
logging_steps=100,
evaluation_strategy=‘steps’,
eval_steps=500,
save_total_limit=1
)

trainer = Trainer(
model=model,
args=training_args,
train_dataset={‘input_ids’:torch.tensor(input_ids,dtype=float),‘attention_mask’:torch.tensor(attention_mask,dtype=float)},
eval_dataset={‘input_ids’:torch.tensor(input_ids_test,dtype=float),‘attention_mask’:torch.tensor(attention_mask_test,dtype=float)},
tokenizer=tokenizer
)

trainer.train()