Hey, so I’m very new to hugging face and LLMs in general. I have fine-tuned an LLM and successfully pushed it to Hugging Face. I am now following this tutorial on downloading and converting the model. It looks like it should be pretty straight forward but it seems like I’m running into some issues.
I think the issues are related to the actual model saving? But if I was able to successfully run, save, load and rerun the model, then I think those steps would be fine? I also was able to successfully push and download to HF, and I assumed there’s some kind of validation for that.
Here’s my code for fine-tuning and writing the model. Most of it is from this tutorial. But I did customize it some.
!pip install -q accelerate==0.21.0 peft==0.4.0 bitsandbytes==0.40.2 transformers==4.31.0 trl==0.4.7
import os
import torch
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
HfArgumentParser,
TrainingArguments,
pipeline,
logging,
)
from peft import LoraConfig, PeftModel
from trl import SFTTrainer
from datasets import Dataset
with open('drive/MyDrive/LLM/classified_collaterals.txt', 'r') as file:
lines = file.readlines()
data = [
[l.replace("'",'').strip() for l in
line.replace('[','').replace(']','').replace('"','').split("', '")]
for line in lines if line.strip()]
data = [d for d in data if len(d) == 2]
formatted_strings = [f"<s>[INST] {item[0]} [/INST] {item[1]} </s>" for item in data]
# Create a dictionary from your list
data_dict = {"text": formatted_strings}
# Create a dataset from the dictionary
dataset = Dataset.from_dict(data_dict)
# The model that you want to train from the Hugging Face hub
model_name = "NousResearch/llama-2-7b-chat-hf"
# The instruction dataset to use
dataset_name = "mlabonne/guanaco-llama2-1k"
# Fine-tuned model name
new_model = "llama-2-7b-miniguanaco"
################################################################################
# QLoRA parameters
################################################################################
# LoRA attention dimension
lora_r = 64
# Alpha parameter for LoRA scaling
lora_alpha = 16
# Dropout probability for LoRA layers
lora_dropout = 0.1
################################################################################
# bitsandbytes parameters
################################################################################
# Activate 4-bit precision base model loading
use_4bit = True
# Compute dtype for 4-bit base models
bnb_4bit_compute_dtype = "float16"
# Quantization type (fp4 or nf4)
bnb_4bit_quant_type = "nf4"
# Activate nested quantization for 4-bit base models (double quantization)
use_nested_quant = False
################################################################################
# TrainingArguments parameters
################################################################################
# Output directory where the model predictions and checkpoints will be stored
output_dir = "./results"
# Number of training epochs
num_train_epochs = 1
# Enable fp16/bf16 training (set bf16 to True with an A100)
fp16 = False
bf16 = False
# Batch size per GPU for training
per_device_train_batch_size = 4
# Batch size per GPU for evaluation
per_device_eval_batch_size = 4
# Number of update steps to accumulate the gradients for
gradient_accumulation_steps = 1
# Enable gradient checkpointing
gradient_checkpointing = True
# Maximum gradient normal (gradient clipping)
max_grad_norm = 0.3
# Initial learning rate (AdamW optimizer)
learning_rate = 2e-4
# Weight decay to apply to all layers except bias/LayerNorm weights
weight_decay = 0.001
# Optimizer to use
optim = "paged_adamw_32bit"
# Learning rate schedule (constant a bit better than cosine)
lr_scheduler_type = "constant"
# Number of training steps (overrides num_train_epochs)
max_steps = -1
# Ratio of steps for a linear warmup (from 0 to learning rate)
warmup_ratio = 0.03
# Group sequences into batches with same length
# Saves memory and speeds up training considerably
group_by_length = True
# Save checkpoint every X updates steps
save_steps = 25
# Log every X updates steps
logging_steps = 25
################################################################################
# SFT parameters
################################################################################
# Maximum sequence length to use
max_seq_length = None
# Pack multiple short examples in the same input sequence to increase efficiency
packing = False
# Load the entire model on the GPU 0
device_map = {"": 0}
# Load tokenizer and model with QLoRA configuration
compute_dtype = getattr(torch, bnb_4bit_compute_dtype)
bnb_config = BitsAndBytesConfig(
load_in_4bit=use_4bit,
bnb_4bit_quant_type=bnb_4bit_quant_type,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_use_double_quant=use_nested_quant,
)
# Check GPU compatibility with bfloat16
if compute_dtype == torch.float16 and use_4bit:
major, _ = torch.cuda.get_device_capability()
if major >= 8:
print("=" * 80)
print("Your GPU supports bfloat16: accelerate training with bf16=True")
print("=" * 80)
# Load base model
model = AutoModelForCausalLM.from_pretrained(
model_name,
quantization_config=bnb_config,
device_map={"":1}
)
model.config.use_cache = False
model.config.pretraining_tp = 1
# Load LLaMA tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right" # Fix weird overflow issue with fp16 training
# Load LoRA configuration
peft_config = LoraConfig(
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
r=lora_r,
bias="none",
task_type="CAUSAL_LM",
)
# Set training parameters
training_arguments = TrainingArguments(
output_dir=output_dir,
num_train_epochs=num_train_epochs,
per_device_train_batch_size=per_device_train_batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
optim=optim,
save_steps=save_steps,
logging_steps=logging_steps,
learning_rate=learning_rate,
weight_decay=weight_decay,
fp16=fp16,
bf16=bf16,
max_grad_norm=max_grad_norm,
max_steps=max_steps,
warmup_ratio=warmup_ratio,
group_by_length=group_by_length,
lr_scheduler_type=lr_scheduler_type,
report_to="tensorboard"
)
# Set supervised fine-tuning parameters
trainer = SFTTrainer(
model=model,
train_dataset=dataset,
peft_config=peft_config,
dataset_text_field="text",
max_seq_length=max_seq_length,
tokenizer=tokenizer,
args=training_arguments,
packing=packing,
)
# Train model
trainer.train()
# Save trained model
trainer.model.save_pretrained(new_model)
tokenizer.save_pretrained(new_model)
# Run text generation pipeline with our next model
prompt = "The machines, equipment, furniture &fixtures, plus any accessions to replacements of, and proceeds from the sale of these (1) Bally machines & accessory equipment."
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer)
result = pipe(f"[INST] {prompt} [/INST]")
print(result[0]['generated_text'])
It trains and runs fine. (Performance isn’t very good but that’s a different story lol). I’m then able to push to load and push to HF.
# Reload model in FP16 and merge it with LoRA weights
base_model = AutoModelForCausalLM.from_pretrained(
model_name,
low_cpu_mem_usage=True,
return_dict=True,
torch_dtype=torch.float16,
device_map="cpu",
)
model = PeftModel.from_pretrained(base_model, f'/content/drive/MyDrive/LLM/{new_model}')
model = model.merge_and_unload()
# Reload tokenizer to save it
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right"
!huggingface-cli login
model.push_to_hub('kaylub/testing')
tokenizer.push_to_hub('kaylub/testing')
This also works fine, and I can verify it pushed. I’m then trying to download and convert the model to gguf.
from huggingface_hub import snapshot_download
model_id="kaylub/testing"
snapshot_download(repo_id=model_id, local_dir="testing",
local_dir_use_symlinks=False, revision="main")
!ls -lash testing
total 13G
4.0K drwxr-xr-x 2 root root 4.0K Jan 26 15:18 .
4.0K drwxr-xr-x 1 root root 4.0K Jan 26 15:18 ..
4.0K -rw-r--r-- 1 root root 632 Jan 26 15:16 config.json
4.0K -rw-r--r-- 1 root root 174 Jan 26 15:16 generation_config.json
4.0K -rw-r--r-- 1 root root 1.5K Jan 26 15:16 .gitattributes
9.3G -rw-r--r-- 1 root root 9.3G Jan 26 15:18 pytorch_model-00001-of-00002.bin
3.3G -rw-r--r-- 1 root root 3.3G Jan 26 15:17 pytorch_model-00002-of-00002.bin
28K -rw-r--r-- 1 root root 27K Jan 26 15:16 pytorch_model.bin.index.json
4.0K -rw-r--r-- 1 root root 434 Jan 26 15:16 special_tokens_map.json
4.0K -rw-r--r-- 1 root root 695 Jan 26 15:16 tokenizer_config.json
1.8M -rw-r--r-- 1 root root 1.8M Jan 26 15:16 tokenizer.json
!git clone https://github.com/ggerganov/llama.cpp.git
!pip install -r llama.cpp/requirements.txt
!python llama.cpp/convert.py -h
Then I try to run the following to convert the file and I get an error:
!python llama.cpp/convert.py testing \
--outfile testing.gguf \
--outtype q8_0
Loading model file testing/pytorch_model-00001-of-00002.bin
Loading model file testing/pytorch_model-00001-of-00002.bin
Loading model file testing/pytorch_model-00002-of-00002.bin
params = Params(n_vocab=32000, n_embd=4096, n_layer=32, n_ctx=4096, n_ff=11008, n_head=32, n_head_kv=32, n_experts=None, n_experts_used=None, f_norm_eps=1e-05, rope_scaling_type=None, f_rope_freq_base=None, f_rope_scale=None, n_orig_ctx=None, rope_finetuned=None, ftype=<GGMLFileType.MostlyQ8_0: 7>, path_model=PosixPath('testing'))
Found vocab files: {'tokenizer.model': None, 'vocab.json': None, 'tokenizer.json': PosixPath('testing/tokenizer.json')}
Loading vocab file 'testing/tokenizer.json', type 'spm'
Traceback (most recent call last):
File "/content/llama.cpp/convert.py", line 1471, in <module>
main()
File "/content/llama.cpp/convert.py", line 1439, in main
vocab, special_vocab = vocab_factory.load_vocab(args.vocab_type, model_parent_path)
File "/content/llama.cpp/convert.py", line 1325, in load_vocab
vocab = SentencePieceVocab(
File "/content/llama.cpp/convert.py", line 391, in __init__
self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
File "/usr/local/lib/python3.10/dist-packages/sentencepiece/__init__.py", line 447, in Init
self.Load(model_file=model_file, model_proto=model_proto)
File "/usr/local/lib/python3.10/dist-packages/sentencepiece/__init__.py", line 905, in Load
return self.LoadFromFile(model_file)
File "/usr/local/lib/python3.10/dist-packages/sentencepiece/__init__.py", line 310, in LoadFromFile
return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg)
RuntimeError: Internal: src/sentencepiece_processor.cc(1101) [model_proto->ParseFromArray(serialized.data(), serialized.size())]
System Info:
- huggingface_hub version: 0.20.3
- Platform: Linux-6.1.58+-x86_64-with-glibc2.35
- Python version: 3.10.12
- Running in iPython ?: Yes
- iPython shell: Shell
- Running in notebook ?: Yes
- Running in Google Colab ?: Yes
- Token path ?: /root/.cache/huggingface/token
- Has saved token ?: True
- Who am I ?: kaylub
- Configured git credential helpers:
- FastAI: 2.7.13
- Tensorflow: 2.15.0
- Torch: 2.1.0+cu121
- Jinja2: 3.1.3
- Graphviz: 0.20.1
- Pydot: 1.4.2
- Pillow: 9.4.0
- hf_transfer: N/A
- gradio: N/A
- tensorboard: N/A
- numpy: 1.23.5
- pydantic: 1.10.14
- aiohttp: 3.9.1
- ENDPOINT: https://huggingface.co
- HF_HUB_CACHE: /root/.cache/huggingface/hub
- HF_ASSETS_CACHE: /root/.cache/huggingface/assets
- HF_TOKEN_PATH: /root/.cache/huggingface/token
- HF_HUB_OFFLINE: False
- HF_HUB_DISABLE_TELEMETRY: False
- HF_HUB_DISABLE_PROGRESS_BARS: None
- HF_HUB_DISABLE_SYMLINKS_WARNING: False
- HF_HUB_DISABLE_EXPERIMENTAL_WARNING: False
- HF_HUB_DISABLE_IMPLICIT_TOKEN: False
- HF_HUB_ENABLE_HF_TRANSFER: False
- HF_HUB_ETAG_TIMEOUT: 10
- HF_HUB_DOWNLOAD_TIMEOUT: 10
{'huggingface_hub version': '0.20.3',
'Platform': 'Linux-6.1.58+-x86_64-with-glibc2.35',
'Python version': '3.10.12',
'Running in iPython ?': 'Yes',
'iPython shell': 'Shell',
'Running in notebook ?': 'Yes',
'Running in Google Colab ?': 'Yes',
'Token path ?': '/root/.cache/huggingface/token',
'Has saved token ?': True,
'Who am I ?': 'kaylub',
'Configured git credential helpers': '',
'FastAI': '2.7.13',
'Tensorflow': '2.15.0',
'Torch': '2.1.0+cu121',
'Jinja2': '3.1.3',
'Graphviz': '0.20.1',
'Pydot': '1.4.2',
'Pillow': '9.4.0',
'hf_transfer': 'N/A',
'gradio': 'N/A',
'tensorboard': 'N/A',
'numpy': '1.23.5',
'pydantic': '1.10.14',
'aiohttp': '3.9.1',
'ENDPOINT': 'https://huggingface.co',
'HF_HUB_CACHE': '/root/.cache/huggingface/hub',
'HF_ASSETS_CACHE': '/root/.cache/huggingface/assets',
'HF_TOKEN_PATH': '/root/.cache/huggingface/token',
'HF_HUB_OFFLINE': False,
'HF_HUB_DISABLE_TELEMETRY': False,
'HF_HUB_DISABLE_PROGRESS_BARS': None,
'HF_HUB_DISABLE_SYMLINKS_WARNING': False,
'HF_HUB_DISABLE_EXPERIMENTAL_WARNING': False,
'HF_HUB_DISABLE_IMPLICIT_TOKEN': False,
'HF_HUB_ENABLE_HF_TRANSFER': False,
'HF_HUB_ETAG_TIMEOUT': 10,
'HF_HUB_DOWNLOAD_TIMEOUT': 10}