training_args = TrainingArguments(
output_dir='./results', # output directory
num_train_epochs=3, # total # of training epochs
per_device_train_batch_size=16, # batch size per device during training
per_device_eval_batch_size=16, # batch size for evaluation
logging_dir='./logs', # directory for storing logs
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["test"],
tokenizer=tokenizer
)
Error Log:
/usr/local/lib/python3.6/dist-packages/transformers/trainer.py in train(self, model_path, trial)
745 tr_loss += self.training_step(model, inputs)
746 else:
â> 747 tr_loss += self.training_step(model, inputs)
748 self._total_flos += self.floating_point_ops(inputs)
749
/usr/local/lib/python3.6/dist-packages/transformers/trainer.py in training_step(self, model, inputs)
1073 loss = self.compute_loss(model, inputs)
1074 else:
-> 1075 loss = self.compute_loss(model, inputs)
1076
1077 if self.args.n_gpu > 1:
/usr/local/lib/python3.6/dist-packages/transformers/trainer.py in compute_loss(self, model, inputs)
1103 self._past = outputs[self.args.past_index]
1104 # We donât use .loss here since the model may return tuples instead of ModelOutput.
-> 1105 return outputs[âlossâ] if isinstance(outputs, dict) else outputs[0]
1106
1107 def is_local_process_zero(self) -> bool:
/usr/local/lib/python3.6/dist-packages/transformers/file_utils.py in getitem(self, k)
1356 if isinstance(k, str):
1357 inner_dict = {k: v for (k, v) in self.items()}
-> 1358 return inner_dict[k]
1359 else:
1360 return self.to_tuple()[k]
KeyError: âlossâ