Error Training Vision Encoder Decoder for Image Captioning

I am trying to train Vision Encoder Decoder with VIT encoder and Hindi GPT2(surajp/gpt2-hindi at main) decoder, for Hindi Image captioning, which my team are doing as a part of Huggingface course project.

Currently my code is this

For creating a dataset

import torch
from torch.utils.data import Dataset
from PIL import Image

class Image_Caption_Dataset(Dataset):
    def __init__(self,root_dir,df, feature_extractor,tokenizer,max_target_length=128):
        self.root_dir = root_dir
        self.df = df
        self.feature_extractor = feature_extractor
        self.tokenizer = tokenizer
        self.max_length=max_target_length
        
    def __len__(self,df):
        return self.df.shape[0]
    
    def __getitem__(self,idx):
        #return image
        image_path = self.df['images'][idx]
        text = self.df['text'][idx]
        #prepare image
        image = Image.open(self.root_dir+'/'+image_path).convert("RGB")
        pixel_values = self.feature_extractor(image, return_tensors="pt").pixel_values
        #add captions by encoding the input
        captions = self.tokenizer(text,
                                 padding='max_length',
                                 max_length=self.max_length).input_ids
        captions = [caption if caption != self.tokenizer.pad_token_id else -100 for caption in captions]
        encoding = {"pixel_values": pixel_values.squeeze(), "labels": torch.tensor(captions)}
        return encoding
from transformers import ViTFeatureExtractor,AutoTokenizer

encoder_checkpoint = 'google/vit-base-patch16-224'
decoder_checkpoint = 'surajp/gpt2-hindi'

feature_extractor = ViTFeatureExtractor.from_pretrained(encoder_checkpoint)
tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint)

root_dir = "../input/flickr8k/Images"


train_dataset = Image_Caption_Dataset(root_dir=root_dir,
                           df=train_df,
                           feature_extractor=feature_extractor,
                                     tokenizer=tokenizer)
val_dataset = Image_Caption_Dataset(root_dir=root_dir,
                           df=test_df,
                           feature_extractor=feature_extractor,
                                     tokenizer=tokenizer)

from transformers import VisionEncoderDecoderModel
# initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized
model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_checkpoint, decoder_checkpoint)
#model.to(device)

After initializing the model I configured the model arguments


# set special tokens used for creating the decoder_input_ids from the labels
model.config.decoder_start_token_id = tokenizer.cls_token_id
model.config.pad_token_id = tokenizer.pad_token_id
# make sure vocab size is set correctly
model.config.vocab_size = model.config.decoder.vocab_size

# set beam search parameters
model.config.eos_token_id = tokenizer.sep_token_id
model.config.max_length = 128
model.config.early_stopping = True
model.config.no_repeat_ngram_size = 3
model.config.length_penalty = 2.0
model.config.num_beams = 4

Then started Training


from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments

training_args = Seq2SeqTrainingArguments(
    predict_with_generate=True,
    evaluation_strategy="steps",
    per_device_train_batch_size=8,
    per_device_eval_batch_size=8,
    fp16=True, 
    output_dir="./",
    logging_steps=2,
    save_steps=1000,
    eval_steps=200,
)

from transformers import default_data_collator

# instantiate trainer
trainer = Seq2SeqTrainer(
    model=model,
    tokenizer=feature_extractor,
    args=training_args,
    compute_metrics=compute_metrics,
    train_dataset=train_dataset,
    eval_dataset=val_dataset,
    data_collator=default_data_collator,
)
trainer.train()

But then I got the error, ValueError: Make sure to set the decoder_start_token_id attribute of the model’s configuration

If you noticed the above code, I have already set this, but when I checked that the tokenizer.cls_token_id was None so I manually set the tokenizer.cls_token_id=’’
but then I got the Index out of range in self, error

Is there any workaround for this, the code is inspired for` here written by @nielsr

I have also tried with custom training loop, I get the same error

1 Like

Hi,

You were already on the good way! The only “mistake” I see here is that GPT2 doesn’t have a CLS token. The CLS token is only defined for encoder-only Transformers such as BERT, RoBERTa. So in this case, the decoder start token can be set to the bos (beginning of sequence) token:

model.config.decoder_start_token_id = tokenizer.bos_token_id

2 Likes

Thank you, I will try this out

1 Like

I’ve updated my previous comment. It might make more sense to use the BOS token instead of the padding token as decoder start token.

This was also done for this demo.

2 Likes

But now I get the index error


11 data_collator=default_data_collator,
12 )
—> 13 trainer.train()

/opt/conda/lib/python3.7/site-packages/transformers/trainer.py in train(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)
1315 tr_loss_step = self.training_step(model, inputs)
1316 else:
→ 1317 tr_loss_step = self.training_step(model, inputs)
1318
1319 if (

/opt/conda/lib/python3.7/site-packages/transformers/trainer.py in training_step(self, model, inputs)
1855 loss = self.compute_loss(model, inputs)
1856 else:
→ 1857 loss = self.compute_loss(model, inputs)
1858
1859 if self.args.n_gpu > 1:

/opt/conda/lib/python3.7/site-packages/transformers/trainer.py in compute_loss(self, model, inputs, return_outputs)
1887 else:
1888 labels = None
→ 1889 outputs = model(**inputs)
1890 # Save past state if it exists
1891 # TODO: this needs to be fixed and made cleaner later.

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = ,

/opt/conda/lib/python3.7/site-

packages/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py in forward(self, pixel_values, decoder_input_ids, decoder_attention_mask, encoder_outputs, past_key_values, decoder_inputs_embeds, labels, use_cache, output_attentions, output_hidden_states, return_dict, **kwargs)
    491             past_key_values=past_key_values,
    492             return_dict=return_dict,
--> 493             **kwargs_decoder,
    494         )
    495 

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1049         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1050                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051             return forward_call(*input, **kwargs)
   1052         # Do not call functions when jit is used
   1053         full_backward_hooks, non_full_backward_hooks = [], []

/opt/conda/lib/python3.7/site-packages/transformers/models/gpt2/modeling_gpt2.py in forward(self, input_ids, past_key_values, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, encoder_hidden_states, encoder_attention_mask, labels, use_cache, output_attentions, output_hidden_states, return_dict)
   1055             output_attentions=output_attentions,
   1056             output_hidden_states=output_hidden_states,
-> 1057             return_dict=return_dict,
   1058         )
   1059         hidden_states = transformer_outputs[0]

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1049         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1050                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051             return forward_call(*input, **kwargs)
   1052         # Do not call functions when jit is used
   1053         full_backward_hooks, non_full_backward_hooks = [], []

/opt/conda/lib/python3.7/site-packages/transformers/models/gpt2/modeling_gpt2.py in forward(self, input_ids, past_key_values, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, output_hidden_states, return_dict)
    828 
    829         if inputs_embeds is None:
--> 830             inputs_embeds = self.wte(input_ids)
    831         position_embeds = self.wpe(position_ids)
    832         hidden_states = inputs_embeds + position_embeds

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1049         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1050                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051             return forward_call(*input, **kwargs)
   1052         # Do not call functions when jit is used
   1053         full_backward_hooks, non_full_backward_hooks = [], []

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/sparse.py in forward(self, input)
    158         return F.embedding(
    159             input, self.weight, self.padding_idx, self.max_norm,
--> 160             self.norm_type, self.scale_grad_by_freq, self.sparse)
    161 
    162     def extra_repr(self) -> str:

/opt/conda/lib/python3.7/site-packages/torch/nn/functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
   2041         # remove once script supports set_grad_enabled
   2042         _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
-> 2043     return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
   2044 
   2045 

IndexError: index out of range in self
1 Like

I checked out my vocab size and length of tokenizer, they are different

Vocab size - 50257
Tokenizer length - 50258

Is that causes index error?

1 Like

Great detective work @seanbenhur ! I think that adding the BOS token is the cause of the problem and you can resize the embedding layer as follows:

model.resize_token_embeddings(len(tokenizer))

Does resizing the embedding layer solve the problem?

1 Like

This is cool, the issue is fixed, the training is started

1 Like