hi All, would you please give me some idea how I can run the attached code with multiple GPUs, with define number of 1,2? As I understand the trainer in HF always goes with gpu:0, but I need to specify the number of GPUs like 1,2. @philschmid @nielsr your help would be appreciated
import os
import torch
import pandas as pd
from datasets import load_dataset
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
os.environ['CUDA_VISIBLE_DEVICES'] = "1,2"
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
HfArgumentParser,
TrainingArguments,
pipeline,
logging,
)
from peft import LoraConfig, PeftModel
from trl import SFTTrainer
model_name="//sentence-transformers/Llama-2-7b-hf"
# The instruction dataset to use
# dataset_name = "mlabonne/guanaco-llama2-1k"
dataset_name = pd.read_parquet('/notebooks/output_data/data.parquet")
# Fine-tuned model name
new_model = "llama-2-7b-miniguanaco"
################################################################################
# bitsandbytes parameters
################################################################################
# BitsAndBytesConfig int-4 config
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
# Load model and tokenizer
model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=bnb_config, use_cache=False, device_map={"": 0}
)
model.config.pretraining_tp = 1
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right"
from peft import LoraConfig, prepare_model_for_kbit_training, get_peft_model
################################################################################
# QLoRA parameters
###########################################################################
# LoRA config based on QLoRA paper
peft_config = LoraConfig(
lora_alpha=16,
lora_dropout=0.1,
r=64,
bias="none",
task_type="CAUSAL_LM",
)
# prepare model for training
model = prepare_model_for_kbit_training(model)
model = get_peft_model(model, peft_config)
from transformers import TrainingArguments
args = TrainingArguments(
output_dir="llama-7-int4-dolly",
num_train_epochs=1,
per_device_train_batch_size=6 if use_flash_attention else 4,
gradient_accumulation_steps=2,
gradient_checkpointing=True,
optim="paged_adamw_32bit",
logging_steps=10,
save_strategy="epoch",
learning_rate=2e-4,
bf16=True,
tf32=True,
max_grad_norm=0.3,
warmup_ratio=0.03,
lr_scheduler_type="constant",
disable_tqdm=True # disable tqdm since with packing values are in correct
)
from trl import SFTTrainer
max_seq_length = 1056 # max sequence length for model and packing of the dataset
trainer = SFTTrainer(
model=model,
train_dataset=dataset,
peft_config=peft_config,
max_seq_length=max_seq_length,
tokenizer=tokenizer,
dataset_text_field="text",
packing=True,
# formatting_func=format_instruction,
args=args,
)
output_dir = "~/Llama-2-7b-hf_results/v2/"
trainer.train() # there will not be a progress bar since tqdm is disabled
# save model
trainer.save_model(output_dir)