When fine-tuning LlamavaV2, what is the loss function that is being used? For instance, here’s the training snippet from Llama Recipes Github fine-tuning script. What is the loss function here?
from transformers import TrainerCallback
from contextlib import nullcontext
enable_profiler = False
output_dir = "tmp/llama-output"
config = {
'lora_config': lora_config,
'learning_rate': 1e-4,
'num_train_epochs': 1,
'gradient_accumulation_steps': 2,
'per_device_train_batch_size': 2,
'gradient_checkpointing': False,
}
from transformers import default_data_collator, Trainer, TrainingArguments
# Define training args
training_args = TrainingArguments(
output_dir=output_dir,
overwrite_output_dir=True,
bf16=True, # Use BF16 if available
# logging strategies
logging_dir=f"{output_dir}/logs",
logging_strategy="steps",
logging_steps=10,
save_strategy="no",
optim="adamw_torch_fused",
max_steps=total_steps if enable_profiler else -1,
**{k:v for k,v in config.items() if k != 'lora_config'}
)
with profiler:
# Create Trainer instance
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
data_collator=default_data_collator,
callbacks=[profiler_callback] if enable_profiler else [],
)
# Start training
trainer.train()