Error : stack expects each tensor to be equal size, but got [24] at entry 0 and [81] at entry 1

I got an error while running my code for chabot by importing gpt-2 model and tokenizer
ERROR : stack expects each tensor to be equal size, but got [24] at entry 0 and [81] at entry 1.

Here is my code :
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import GPT2LMHeadModel, GPT2Tokenizer, AdamW
class QADataset(Dataset):
def init(self, qa_pairs, tokenizer):
self.tokenizer = tokenizer
self.inputs =
self.attention_masks =

    for pair in qa_pairs:
        question = pair["question"]
        answer = pair["answer"]
        encoded = tokenizer.encode_plus(
            question,
            answer,
            add_special_tokens=True,
            padding="longest",
            max_length=512,
            truncation=True,
            return_tensors="pt"
        )

        self.inputs.append(encoded.input_ids.squeeze())
        self.attention_masks.append(encoded.attention_mask.squeeze())

def __len__(self):
    return len(self.inputs)

def __getitem__(self, idx):
    return {
        "input_ids": self.inputs[idx],
        "attention_mask": self.attention_masks[idx],
    }

# Load pretrained tokenizer and model
tokenizer = GPT2Tokenizer.from_pretrained(“gpt2”)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.add_special_tokens({‘pad_token’: ‘’})
model_name = “gpt2”
model = GPT2LMHeadModel.from_pretrained(model_name)
model.resize_token_embeddings(len(tokenizer))

# Create an empty dataset to store user questions
user_questions =

# Train the model with initial labelled data
qa_pairs = [
{
“question”: “Axpert license will expire on dd/mm/yyyy. Please renew the license before expiry.”,
“answer”: “The licence information button will turn RED and display a warning regarding licence expiry 10 days before it expires. This licence expiry notification message and timings can also be modified at the solution level to meet the needs of the customer.”
},
{
“question”: “Disconnected because you have logged into another session”,
“answer”: “Restricted login for the same user per session. i.e If a user logs in a second time without exiting the first session, the first session expires and the following notice is displayed. Keep in mind that a session is the period of time that begins when a user logs in and ends when they log out.”
},
]

# Convert initial labelled data to tensors and create dataset
dataset = QADataset(qa_pairs,tokenizer)

# Split dataset into training and validation sets
train_size = int(0.8 * len(dataset))
val_size = len(dataset) - train_size
train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])

# Create data loaders for training and validation sets
batch_size = 2
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)

device = torch.device(“cuda” if torch.cuda.is_available() else “cpu”)
model.to(device)
model.train()

optimizer = AdamW(model.parameters(), lr=1e-5)
epochs = 5

# Training loop
for epoch in range(epochs):
model.train()
total_loss = 0

for batch in train_dataloader:
    input_ids = batch["input_ids"].to(device)
    attention_mask = batch["attention_mask"].to(device)

    outputs = model(input_ids=input_ids, attention_mask=attention_mask, labels=input_ids)
    loss = outputs.loss

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    total_loss += loss.item()

average_loss = total_loss / len(train_dataloader)
print(f"Epoch {epoch + 1} - Loss: {average_loss}")

**# Evaluation on validation set**
model.eval()
with torch.no_grad():
    total_val_loss = 0
    for val_batch in val_dataloader:
        val_input_ids = val_batch["input_ids"].to(device)
        val_attention_mask = val_batch["attention_mask"].to(device)

        val_outputs = model(input_ids=val_input_ids, attention_mask=val_attention_mask, labels=val_input_ids)
        val_loss = val_outputs.loss

        total_val_loss += val_loss.item()

    average_val_loss = total_val_loss / len(val_dataloader)
    print(f"Epoch {epoch + 1} - Validation Loss: {average_val_loss}")

**# Prompt the user for new unlabelled questions and add them to the dataset**
user_questions = []  # Reset user_questions for each epoch

while True:
    user_input = input("Enter your question (or 'q' to quit): ")
    if user_input.lower() == "q":
        break
    user_questions.append(user_input)

    **# Convert the user input to tensor**
    user_input_ids = tokenizer.encode(user_input, add_special_tokens=True, truncation=True, max_length=512,
                                      padding="longest", return_tensors="pt").to(device)

    **# Generate a response for the user input**
    generated_output = model.generate(user_input_ids, max_length=50, num_return_sequences=1)
    generated_response = tokenizer.decode(generated_output[0], skip_special_tokens=True)
    print("Generated Response:", generated_response)

**# Update the dataset with the user questions**
updated_user_questions = QADataset(user_questions, tokenizer)
updated_dataset = ConcatDataset([train_dataset, updated_user_questions])
train_dataloader = DataLoader(updated_dataset, batch_size=batch_size, shuffle=True)

# Save the final model
torch.save(model.state_dict(), “qa_model.pt”)