RuntimeError: Cannot re-initialize CUDA in forked subprocess

I’m trying to launch distributed training with Stable Diffusion, but CUDA is initialized even before any model/dataset is loaded, which probably causes this bug. In order words, if you go to the last fews line of the following code (print(“before loading tokenizer: cuda:”,torch.cuda.is_available())), it prints True. Any help is much appreciated!

Part of my code as addition information:

from huggingface_hub import login
login("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")
# notebook_login()
# from google.colab import drive
# drive.mount("/content/drive",force_remount=True)

# + id="1_h0kO-VnQog" outputId="530fc822-cdad-4642-9e8b-37f9651dbd9d"
#@title Import required libraries
# # %pip install protobuf==3.20.* #For deepspe

import argparse
import itertools
import math
import os
import random
import numpy as np
import torch,torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint
from torch.utils.data import Dataset
import torchvision
import PIL
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from diffusers import AutoencoderKL, DDPMScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.hub_utils import init_git_repo, push_to_hub
from diffusers.optimization import get_scheduler
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
from PIL import Image
from tqdm.auto import tqdm
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer,TrainingArguments
import kornia.augmentation as K#augmentaiton
import pandas as pd
import wandb
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("--lr",help="learning rate",default=5e-6,type=int)
parser.add_argument("--epochs",default=12,type=int)
parser.add_argument("--train_unet",help="whether to train Unet or not",default=False,type=bool)
parser.add_argument("--decay",help="weight_decay",default=1e-2,type=int)
parser.add_argument("--train_text_encoder",default=True,type=bool)
parser.add_argument("--data_root",default="../book dataset",type=str)
parser.add_argument("--num_examples",default=6000,type=int,help="number of training examples")
parser.add_argument("--num_devices",default=3)
parser.add_argument("--gradient_acc_steps",default=8,type=int)
args = parser.parse_args()
def image_grid(imgs, rows, cols):
    assert len(imgs) == rows*cols

    w, h = imgs[0].size
    grid = Image.new('RGB', size=(cols*w, rows*h))
    grid_w, grid_h = grid.size
    
    for i, img in enumerate(imgs):
        grid.paste(img, box=(i%cols*w, i//cols*h))
    return grid
    
#For reproducibility
def set_seed(seed: int = 42) -> None:
    np.random.seed(seed)
    random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    # When running on the CuDNN backend, two further options must be set
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    # Set a fixed value for the hash seed
    os.environ["PYTHONHASHSEED"] = str(seed)
    print(f"Random seed set as {seed}")
global_seed = 42


# + id="If5Jswe526QP"
#@markdown `pretrained_model_name_or_path` which Stable Diffusion checkpoint you want to use
#pretrained_model_name_or_path = "runwayml/stable-diffusion-v1-5" #@param {type:"string"}
pretrained_model_name_or_path ="CompVis/stable-diffusion-v1-4"
# data_root="/kaggle/input/goodreads-best-books"
# label_root="/kaggle/input/goodreads-best-book-cleaned-version"
data_root=args.data_root
label_root=args.data_root

book_cover_templates=[#the first entry is for "highly legible text"
    "A {} book cover with author {}, book title {} ",
    #repeat some prompts to give model prior knowledge about book cover styles
    "A {} book cover written by author {} with book title {} ",
#     "A {} simple book cover with author {}, book title {} ",
#     "A plain {} book cover with author {}. The book title is{} ",
#     "A {} vivid book cover with author {}, book title {} ",
    "A  {} book cover with author name:{}, book title: {}",
# #     "We are going to create a clear, {}, highly detailed book cover with author named {}, and book title is '{}'",
#     "An intricate {}, book cover including book author:{}, book title: '{}'",
#     "A detailed, {}, book cover with {} ,written by author {}",
#     "A creative, colorful {}, book cover written by {}. The book title is {}, ",
#     "A {} old-fashioned, plain book cover written by {}. The book title is {}",
#     "A simple, {}, old-fashioned book cover with author name {}, book title {} ",
#     "A simple, {}, plain book cover with author name {}, book title {} ",
    "A detailed {} book cover with author {} and book title {} "
    
]
#TODO: add more to match the number of templates
summary_placeholders=[
    ", and summary: {}",
    ', and abstract: {}',
    ",summary: {}",
    ", the book describes that {}",
    ", book discription: {}",
    ", main story: {}",
    ", the book is mainly about {}",
    ", and main story: {}",
    "and book abstract: {}",
    ", and book description: {}"
]
test_templates=[#the first entry is for "highly legible text"
    "A {} book cover with author {}, book title {} ",
    #repeat some prompts to give model prior knowledge about book cover styles
    "A {} book cover written by author {} with book title {} ",
    "A {} simple book cover with author {}, book title {} ",
    "A plain {} book cover with author {}. The book title is{} ",
    "A {} vivid book cover with author {}, book title {} ",
    "A  {} book cover with author name:{}, book title: {}",
#     "We are going to create a clear, {}, highly detailed book cover with author named {}, and book title is '{}'",
    "An intricate {}, book cover including book author:{}, book title: '{}'",
    "A detailed, {}, book cover with {} ,written by author {}",
    "A creative, colorful {}, book cover written by {}. The book title is {}, ",
    "A {} old-fashioned, plain book cover written by {}. The book title is {}",
    "A simple, {}, old-fashioned book cover with author name {}, book title {} ",
    "A simple, {}, plain book cover with author name {}, book title {} ",
    "A detailed {} book cover with author {} and book title {} "
    
]
#pad to the same length 
for i in range(len(summary_placeholders),len(test_templates)):
  summary_placeholders+=[random.choice(summary_placeholders)]
summary_placeholders=summary_placeholders[:len(test_templates)]

# imagenet_templates_small = [
#     "a photo of a {}",
#     "a rendering of a {}",
#     "a cropped photo of the {}",
#     "the photo of a {}",
#     "a photo of a clean {}",
#     "a photo of a dirty {}",
#     "a dark photo of the {}",
#     "a photo of my {}",
#     "a photo of the cool {}",
#     "a close-up photo of a {}",
#     "a bright photo of the {}",
#     "a cropped photo of a {}",
#     "a photo of the {}",
#     "a good photo of the {}",
#     "a photo of one {}",
#     "a close-up photo of the {}",
#     "a rendition of the {}",
#     "a photo of the clean {}",
#     "a rendition of a {}",
#     "a photo of a nice {}",
#     "a good photo of a {}",
#     "a photo of the nice {}",
#     "a photo of the small {}",
#     "a photo of the weird {}",
#     "a photo of the large {}",
#     "a photo of a cool {}",
#     "a photo of a small {}",
# ]

# imagenet_style_templates_small = [
#     "a painting in the style of {}",
#     "a rendering in the style of {}",
#     "a cropped painting in the style of {}",
#     "the painting in the style of {}",
#     "a clean painting in the style of {}",
#     "a dirty painting in the style of {}",
#     "a dark painting in the style of {}",
#     "a picture in the style of {}",
#     "a cool painting in the style of {}",
#     "a close-up painting in the style of {}",
#     "a bright painting in the style of {}",
#     "a cropped painting in the style of {}",
#     "a good painting in the style of {}",
#     "a close-up painting in the style of {}",
#     "a rendition in the style of {}",
#     "a nice painting in the style of {}",
#     "a small painting in the style of {}",
#     "a weird painting in the style of {}",
#     "a large painting in the style of {}",
# ]

# + id="fcA-kMQblqUe"
#@title Training hyperparameters 
hyperparam = {
    "learning_rate": args.lr, #original: 5e-4
    "scale_lr": False,
    "epochs": args.epochs,
    "train_batch_size": 1,
    "gradient_accumulation_steps": args.gradient_acc_steps,
    "seed": global_seed,
    "weight_decay": args.decay,
    # "noise_scheduler": "DDIM",
    "pretrained_model_name_or_path": pretrained_model_name_or_path,
    "output_dir": "./model",
    "training_dataset_size":args.num_examples,
    "train_unet": args.train_unet,
    "train_text_encoder": args.train_text_encoder,
    "num_templates": len(book_cover_templates),
    "include_summary": False,#True to add book summary to prompts
    "templates" : book_cover_templates
}

# + id="xp2InXqXW8aY" outputId="32997303-8897-4243-9a83-e4b75a03272e"
#@title Load the Stable Diffusion model

print("before loading tokenizer: cuda:",torch.cuda.is_available())
tokenizer = CLIPTokenizer.from_pretrained(
    pretrained_model_name_or_path,
    subfolder="tokenizer",
    use_auth_token=True,
)

move all stuff about diffusers under a process including the sentence of import packages.