Problem "Target size must be the same as input size "

Hello everyone, Iā€™m encountering an issue with my code for the NLI classification task. I have three labels, and Iā€™m using BERT. When attempting to run my program, I encounter the following error message: ā€œraise ValueError (f"Target size ({target.size()}) must be the same as input size.ā€ Thank you in advance for any assistance. ({input.size()})ā€œ)
ValueError: Target size (torch.Size([1])) must be the same as input size (torch.Size([1, 3]))ā€

-----------------------------------code-------------------------------------------------------------------

-------------Function----------------------

def compute_metrics(eval_pred):
logits = eval_pred.predictions[0]
labels = eval_pred.label_ids
predictions = np.argmax(logits, axis=-1)
return {ā€œf1ā€: f1_score(labels, predictions)}

def tokenize_function(example):
give_premise = 'Premise: ā€™
give_hypothesis = "\nHypothesis: "
give_classification = '\n\nClassification: ā€™
prompt = [give_premise + premise + give_hypothesis + hypothesis + give_classification for premise, hypothesis in zip(example[ā€œpremiseā€], example[ā€œhypothesisā€])]
example[ā€˜input_idsā€™] = tokenizer(prompt, padding=ā€œmax_lengthā€, truncation=True, return_tensors=ā€œptā€).input_ids

return example

#--------------Loading dataset----------------

#utilFunction.balanced_dataset(ā€œā€¦/train.csvā€, 200, ā€œbalanced_train.csvā€)
#utilFunction.balanced_dataset(ā€œā€¦/validation_matched.csvā€,20, ā€œbalanced_validation_matched.csvā€)
#utilFunction.balanced_dataset(ā€œā€¦/validation_mismatched.csvā€,20, ā€œbalanced_validation_mismatched.csvā€)

data_files = {ā€œtrainā€: ā€œbalanced_train.csvā€, ā€œtestā€: ā€œbalanced_validation_matched.csvā€, ā€œvalidationā€: ā€œbalanced_validation_mismatched.csvā€}

#dataset = load_dataset(ā€œcsvā€, data_files=data_files)

df = pd.read_csv(ā€˜balanced_train.csvā€™)
df = df.rename(columns={ā€˜NliClassā€™: ā€˜labelsā€™})
df[ā€˜labelsā€™] = df[ā€˜labelsā€™].astype(int)
dataset = datasets.Dataset.from_pandas(df)
print(dataset)
label2id = {0: ā€˜ENTAILMENTā€™, 1: ā€˜NEUTRALā€™, 2: ā€˜CONTRADICTIONā€™}
id2label = {ā€˜ENTAILMENTā€™: 0, ā€˜NEUTRALā€™: 1, ā€˜CONTRADICTIONā€™: 2}

#--------------Loading Model And Tokenizer----------------

model_name=ā€œbert-base-uncasedā€
original_model = BertForSequenceClassification.from_pretrained(model_name, problem_type=ā€œmulti_label_classificationā€, num_labels=3,
id2label=id2label, label2id=label2id, ignore_mismatched_sizes=True)

print(original_model)
print(original_model.config.id2label)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenized_datasets = dataset.map(tokenize_function, batched=True)

ls = [ā€˜input_idsā€™, ā€˜labelsā€™]

for var in df:
if var not in ls:
tokenized_datasets = tokenized_datasets.remove_columns(var)

#--------------Print trainable parameter and generate original output----------------
print(utilFunction.print_number_of_trainable_model_parameters(original_model))

#--------------------- DEFINE-PEFT-LORA---------------------------

lora_config = LoraConfig(
task_type=TaskType.SEQ_CLS, r=1, lora_alpha=1, lora_dropout=0.1
)
model = get_peft_model(original_model, lora_config)

metric = evaluate.load(ā€œaccuracyā€)

peft_training_args = TrainingArguments(output_dir=ā€œtest_trainerā€, evaluation_strategy=ā€œepochā€, num_train_epochs=25, per_device_train_batch_size=1)

peft_trainer = Trainer(
model=model,
args=peft_training_args,
train_dataset=tokenized_datasets,
#eval_dataset=tokenized_datasets[ā€œvalidationā€],
compute_metrics=compute_metrics,
)

#--------------------- TRAIN ---------------------------

peft_trainer.train()