Fine tunning llama2 with multiple GPUs and Hugging face trainer

Hi All, @phucdoitoan , I am using this code but my issue is that I need multiple gpus, for example using GPU 1,2,3 (not gpu 0) . would you please help me to understand how I can change the code or add any extra lines to run it in multiple gpus? for me trainer in Hugging face always needs GPU :0 be free , even if I use GPU 1,2,. any help would be appreciated.


import os
import torch
import pandas as pd
from datasets import load_dataset

from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    BitsAndBytesConfig,
    HfArgumentParser,
    TrainingArguments,
    pipeline,
    logging,
)
from peft import LoraConfig, PeftModel
from trl import SFTTrainer

model_name="//sentence-transformers/Llama-2-7b-hf"

# The instruction dataset to use
# dataset_name = "mlabonne/guanaco-llama2-1k"
dataset_name = pd.read_parquet('/notebooks/output_data/data.parquet")

# Fine-tuned model name
new_model = "llama-2-7b-miniguanaco"


################################################################################
# bitsandbytes parameters
################################################################################

# BitsAndBytesConfig int-4 config
bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_use_double_quant=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16
)

# Load model and tokenizer
model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=bnb_config, use_cache=False, device_map={"": 0}
)
model.config.pretraining_tp = 1

tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right"

from peft import LoraConfig, prepare_model_for_kbit_training, get_peft_model
################################################################################
# QLoRA parameters
###########################################################################
# LoRA config based on QLoRA paper
peft_config = LoraConfig(
        lora_alpha=16,
        lora_dropout=0.1,
        r=64,
        bias="none",
        task_type="CAUSAL_LM",
)


# prepare model for training
model = prepare_model_for_kbit_training(model)
model = get_peft_model(model, peft_config)


from transformers import TrainingArguments

args = TrainingArguments(
    output_dir="llama-7-int4-dolly",
    num_train_epochs=1,
    per_device_train_batch_size=6 if use_flash_attention else 4,
    gradient_accumulation_steps=2,
    gradient_checkpointing=True,
    optim="paged_adamw_32bit",
    logging_steps=10,
    save_strategy="epoch",
    learning_rate=2e-4,
    bf16=True,
    tf32=True,
    max_grad_norm=0.3,
    warmup_ratio=0.03,
    lr_scheduler_type="constant",
    disable_tqdm=True # disable tqdm since with packing values are in correct
)


from trl import SFTTrainer

max_seq_length = 1056 # max sequence length for model and packing of the dataset

trainer = SFTTrainer(
    model=model,
    train_dataset=dataset,
    peft_config=peft_config,
    max_seq_length=max_seq_length,
    tokenizer=tokenizer,
    dataset_text_field="text",
    packing=True,
    # formatting_func=format_instruction,
    args=args,
)

output_dir = "~/Llama-2-7b-hf_results/v2/"

trainer.train() # there will not be a progress bar since tqdm is disabled
# save model
trainer.save_model(output_dir)
2 Likes

I am facing the same situation. Did you get an answer?