I’ve read other people having similar errors, but none of the answers really helped with the error I’m getting now.
When I call:
model = BertWithFeats(bert_config, params)
I get the error above, I don’t understand the reason since BertForSequenceClassification should have labels in its arguments. Any help is really appreciated, thanks a lot!
class BertWithFeats(BertForSequenceClassification):
    def __init__(self, bert__config, params):
        super(BertWithFeats, self).__init__(bert_config)
        self.params=params
        self.tokenizer = params.get('tokenizer')
        self.topic = params.get('topic')
        # Store the number of labels, which tells us whether this is a
        # classification or regression task.
        self.num_labels = bert_config.num_labels
        # Calculate the combined vector length.
        self.classifier = FeatureAdaptedClassificationHead(bert_config)
        self.config = bert_config
    def forward(self, input_ids, attention_mask=None, token_type_ids=None,
                labels=None):
        outputs = self.bert(    
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            labels=labels
        )
        combined_feats=aggregate_features(outputs, self.tokenizer, inputs_ids, self.topic)
        print(combined_feats.shape)
        logits = self.classifier(combined_feats)
        loss = None
        # compute the loss as it is done in the original robertaforseqclassification code
        if labels is not None:
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = "regression"
                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
                    self.config.problem_type = "single_label_classification"
                else:
                    self.config.problem_type = "multi_label_classification"
            if self.config.problem_type == "regression":
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == "single_label_classification":
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == "multi_label_classification":
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)
        if not return_dict:
            output = (logits,) + outputs[2:]
            return ((loss,) + output) if loss is not None else output
        # return the result as it is done in the original code, loss, logits, hidden states and attentions
        return SequenceClassifierOutput(
            loss=loss,
            logits=logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )
class FeatureAdaptedClassificationHead(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
    def forward(self, features, **kwargs):
        x = features 
        x = self.dropout(x)
        x = self.dense(x)
        x = torch.tanh(x)
        x = self.dropout(x)
        x = self.out_proj(x)
        return x