Trainer freezes after all steps are complete (multi-gpu setting)

I am running the script attached below. After a long time it has finished all the steps but no further output in the logs, no checkpoint saved, and script still seems to be running (with 0% GPU usage).
The script had worked fine on the tiny version of dataset that i used to verify if everything was working.

python -m torch.distributed.launch --nproc-per-node=4 finetune_flan.py > log.txt 2>&1

Last output

100%|██████████| 26786/26786 [4:29:43<00:00, 1.42it/s]

#Adapted from https://github.com/philschmid/deep-learning-pytorch-huggingface/blob/main/training/flan-t5-samsum-summarization.ipynb

from transformers import T5Tokenizer, T5ForConditionalGeneration
from datasets import concatenate_datasets, Dataset, load_dataset
import jsonlines, json
from array import array
from transformers import AutoModelForSeq2SeqLM
from transformers import DataCollatorForSeq2Seq



import evaluate, nltk
import numpy as np
from nltk.tokenize import sent_tokenize
from huggingface_hub import HfFolder
from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments

import os
os.environ["WANDB_DISABLED"] = "true"

nltk.download("punkt")

tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")

#Evaluation
metric = evaluate.load("rouge")
def postprocess_text(preds, labels):
    preds = [pred.strip() for pred in preds]
    labels = [label.strip() for label in labels]

    # rougeLSum expects newline after each sentence
    preds = ["\n".join(sent_tokenize(pred)) for pred in preds]
    labels = ["\n".join(sent_tokenize(label)) for label in labels]

    return preds, labels
def compute_metrics(eval_preds):
    preds, labels = eval_preds
    if isinstance(preds, tuple):
        preds = preds[0]
    decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
    # Replace -100 in the labels as we can't decode them.
    labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
    decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)

    # Some simple post-processing
    decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)

    result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
    result = {k: round(v * 100, 4) for k, v in result.items()}
    prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
    result["gen_len"] = np.mean(prediction_lens)
    return result

 #pre process and training is next

max_source_length = 512
max_target_length=512
def preprocess_function(sample,padding="max_length"):


    # tokenize inputs
    model_inputs = tokenizer(sample["input"], max_length=max_source_length, padding=padding, truncation=True)

    # Tokenize targets with the `text_target` keyword argument
    labels = tokenizer(sample["output"], max_length=max_target_length, padding=padding, truncation=True)

    # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
    # padding in the loss.
    if padding == "max_length":
        labels["input_ids"] = [
            [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
        ]

    model_inputs["labels"] = labels["input_ids"]
    return model_inputs

def main():
    train = load_dataset("json",data_files="../data/combined_train.jsonl")

    test = load_dataset("json", data_files="../data/combined_eval.jsonl")

    tokenized_train_dataset = train.map(preprocess_function, batched=True)
    tokenized_eval_dataset = test.map(preprocess_function, batched=True)


    # we want to ignore tokenizer pad token in the loss
    label_pad_token_id = -100
    # Data collator
    data_collator = DataCollatorForSeq2Seq(
        tokenizer,
        model=model,
        label_pad_token_id=label_pad_token_id,
        pad_to_multiple_of=8
        )

    output_loc="../model"
    #output_loc = "/home/nlp-shared-scratch/rdivekar/conversation-gen/model"

    training_args = Seq2SeqTrainingArguments(
        output_dir=output_loc,
        per_device_train_batch_size=2,
        per_device_eval_batch_size=2,
        predict_with_generate=True,
        fp16=False, # Overflows with fp16
        bf16=True,
        learning_rate=5e-5,
        num_train_epochs=5,
        # logging & evaluation strategies
        logging_dir=f"{output_loc}/logs",
        logging_strategy="steps",
        logging_steps=500,
        evaluation_strategy="epoch",
        save_strategy="epoch",
        save_total_limit=2,
        load_best_model_at_end=True
        # metric_for_best_model="overall_f1",
        # push to hub parameters
        # report_to="tensorboard",
        # push_to_hub=False,
        # hub_strategy="every_save",
        # hub_model_id=repository_id,
        # hub_token=HfFolder.get_token(),
    )

    # Create Trainer instance
    trainer = Seq2SeqTrainer(
        model=model,
        args=training_args,
        data_collator=data_collator,
        train_dataset=tokenized_train_dataset["train"],
        eval_dataset=tokenized_eval_dataset["train"],
        compute_metrics=compute_metrics,
    )

    trainer.train()
    trainer.evaluate()
    tokenizer.save_pretrained(output_loc)


if __name__ == "__main__":
    main()
2 Likes

Hey, thanks for your post. I run into the same problem using Trainer. The only difference is that my training hangs up in iteration n-1:

100%|█████████▉| 2875/2876 [11:31<00:00,  4.27it/s]

Have you solved it? Any idea how to debug?

write the following to your enviroment (bashrc or zshrc) and source will solve the problem:

export NCCL_P2P_DISABLE=“1”
export NCCL_IB_DISABLE=“1"

2 Likes

Hi, I encountered the same issue, have you solved it?

This fixed the same issue for me. Thanks!