I tried to train Qwen-2.5-3b with GRPO on the LIMO dataset using revised code fr…om the colab example: https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-GRPO.ipynb
However, the context length can not be scaled up above 1.5k due to OOM errors, which is not an expected situation considering the blog (https://unsloth.ai/blog/grpo) using 54.3GB of VRAM for Llama 3.1 8B.
The revised code I use collects VRAM usage messages which shows that the maximum allocation of VRAM by pytorch is far below 48GB.
The "RuntimeError: CUDA error: out of memory" does not prompt the allocation behavior that causes this error with only a confusing stack trace pointing to a non-CUDA operation, which is shown in the debugging messages below at the end of this description.
I am not familiar with the multi-worker mechanism of Pytorch could you please kindly point out what I have done wrong?
I would appreciate any of your responses. Thanks.
Code snip pasted below:
```python
from unsloth import FastLanguageModel
import torch
import gc
from functools import wraps
import time
from UnslothGRPOTrainer import (
UnslothGRPOConfig as GRPOConfig,
UnslothGRPOTrainer as GRPOTrainer,
)
from transformers import TrainerCallback
import numpy as np
from functools import wraps
import re
from datasets import load_dataset, Dataset
# Load and prep dataset
SYSTEM_PROMPT = """
Respond in the following format:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
XML_COT_FORMAT = """\
<reasoning>
{reasoning}
</reasoning>
<answer>
{answer}
</answer>
"""
def extract_hash_answer(text: str) -> str | None:
if "####" not in text:
return None
return text.split("####")[1].strip()
def track_memory(func):
@wraps(func)
def wrapper(*args, **kwargs):
print(f"\n=== Entering {func.__name__} ===")
print(f"Memory before: {torch.cuda.memory_allocated() / 1024**3:.2f} GB")
print(f"Memory reserved before: {torch.cuda.memory_reserved() / 1024**3:.2f} GB")
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print(f"Time taken: {end_time - start_time:.2f} seconds")
print(f"Memory after: {torch.cuda.memory_allocated() / 1024**3:.2f} GB")
print(f"Memory reserved after: {torch.cuda.memory_reserved() / 1024**3:.2f} GB")
print(f"=== Exiting {func.__name__} ===\n")
return result
return wrapper
def get_gsm8k_questions(split = "train") -> Dataset:
# data = load_dataset('openai/gsm8k', 'main')[split] # type: ignore
data = load_dataset('GAIR/LIMO')[split] # type: ignore
data = data.map(lambda x: { # type: ignore
'prompt': [
{'role': 'system', 'content': SYSTEM_PROMPT},
{'role': 'user', 'content': x['question']}
],
# 'answer': extract_hash_answer(x['answer'])
'answer': x['answer']
}) # type: ignore
return data # type: ignore
@track_memory
def load_dataset_with_tracking():
return get_gsm8k_questions()
dataset = load_dataset_with_tracking()
max_seq_length = 4096 * 2 # Can increase for longer reasoning traces
lora_rank = 32 # Larger rank = smarter, but slower
max_prompt_length = 512 # Moved here
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "Qwen/Qwen2.5-3B-Instruct",
max_seq_length = max_seq_length,
load_in_4bit = True, # False for LoRA 16bit
fast_inference = True, # Enable vLLM fast inference
max_lora_rank = lora_rank,
gpu_memory_utilization = 0.10, # Reduce if out of memory
float8_kv_cache=True
)
model = FastLanguageModel.get_peft_model(
model,
r = lora_rank, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
target_modules = [
# "q_proj", "k_proj", "v_proj", "o_proj",
"gate_proj", "up_proj", "down_proj",
], # Remove QKVO if out of memory
lora_alpha = lora_rank,
# use_gradient_checkpointing=False,
use_gradient_checkpointing = "unsloth", # Enable long context finetuning
random_state = 3407,
# float8_kv_cache=True
)
def extract_xml_answer(text: str) -> str:
answer = text.split("<answer>")[-1]
answer = answer.split("</answer>")[0]
return answer.strip()
# uncomment middle messages for 1-shot prompting
# Modify training configuration
training_args = GRPOConfig(
learning_rate = 5e-6,
adam_beta1 = 0.9,
adam_beta2 = 0.99,
weight_decay = 0.1,
warmup_ratio = 0.1,
lr_scheduler_type = "cosine",
optim = "paged_adamw_8bit",
logging_steps = 1,
per_device_eval_batch_size = 2,
per_device_train_batch_size = 1,
gradient_accumulation_steps = 1, # Increased for better memory management
num_generations = 4, # Reduced to help identify memory issues
max_prompt_length = max_prompt_length,
max_completion_length = max_seq_length - max_prompt_length,
max_steps = 1000000, # Reduced for testing
save_steps = 50,
max_grad_norm = 0.1,
report_to = "none",
output_dir = "outputs",
)
# Add memory tracking before model initialization
print("\n=== Model Initialization Memory Usage ===")
print(f"Memory before model init: {torch.cuda.memory_allocated() / 1024**3:.2f} GB")
print(f"Memory reserved before model init: {torch.cuda.memory_reserved() / 1024**3:.2f} GB")
# Add memory tracking for dataset loading
# Reward functions
def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:
responses = [completion[0]['content'] for completion in completions]
q = prompts[0][-1]['content']
extracted_responses = [extract_xml_answer(r) for r in responses]
print('-'*20, f"Question:\n{q}", f"\nAnswer:\n{answer[0]}", f"\nResponse:\n{responses[0]}", f"\nExtracted:\n{extracted_responses[0]}")
return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]
def int_reward_func(completions, **kwargs) -> list[float]:
responses = [completion[0]['content'] for completion in completions]
extracted_responses = [extract_xml_answer(r) for r in responses]
return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]
def strict_format_reward_func(completions, **kwargs) -> list[float]:
"""Reward function that checks if the completion has a specific format."""
pattern = r"^<reasoning>\n.*?\n</reasoning>\n<answer>\n.*?\n</answer>\n$"
responses = [completion[0]["content"] for completion in completions]
matches = [re.match(pattern, r) for r in responses]
return [0.5 if match else 0.0 for match in matches]
def soft_format_reward_func(completions, **kwargs) -> list[float]:
"""Reward function that checks if the completion has a specific format."""
pattern = r"<reasoning>.*?</reasoning>\s*<answer>.*?</answer>"
responses = [completion[0]["content"] for completion in completions]
matches = [re.match(pattern, r) for r in responses]
return [0.5 if match else 0.0 for match in matches]
def count_xml(text) -> float:
count = 0.0
if text.count("<reasoning>\n") == 1:
count += 0.125
if text.count("\n</reasoning>\n") == 1:
count += 0.125
if text.count("\n<answer>\n") == 1:
count += 0.125
count -= len(text.split("\n</answer>\n")[-1])*0.001
if text.count("\n</answer>") == 1:
count += 0.125
count -= (len(text.split("\n</answer>")[-1]) - 1)*0.001
return count
def xmlcount_reward_func(completions, **kwargs) -> list[float]:
contents = [completion[0]["content"] for completion in completions]
return [count_xml(c) for c in contents]
# Add memory tracking to key functions
@track_memory
def compute_loss(self, *args, **kwargs):
return super().compute_loss(*args, **kwargs)
# Modify the trainer class to include memory tracking
class MemoryTrackedGRPOTrainer(GRPOTrainer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.compute_loss = track_memory(self.compute_loss)
# Replace the trainer instantiation with the memory tracked version
trainer = MemoryTrackedGRPOTrainer(
model = model,
processing_class = tokenizer,
reward_funcs = [
xmlcount_reward_func,
soft_format_reward_func,
strict_format_reward_func,
int_reward_func,
correctness_reward_func,
],
args = training_args,
train_dataset = dataset,
)
# Add memory tracking to the training loop
@track_memory
def train_with_memory_tracking():
trainer.train()
# Replace trainer.train() with the tracked version
train_with_memory_tracking()
text = tokenizer.apply_chat_template([
{"role" : "user", "content" : "Which is bigger? 9.11 or 9.9?"},
], tokenize = False, add_generation_prompt = True)
```
The debugging messages:
```
=== Model Initialization Memory Usage ===
Memory before model init: 3.93 GB
Memory reserved before model init: 4.02 GB
=== Entering train_with_memory_tracking ===
Memory before: 3.93 GB
Memory reserved before: 4.02 GB
0%| | 0/1000000 [00:00<?, ?it/s]
0%| | 1/1000000 [00:14<4033:48:31, 14.52s/it]
0%| | 1/1000000 [00:14<4033:48:31, 14.52s/it][rank0]: Traceback (most recent call last):
[rank0]: File "<frozen runpy>", line 198, in _run_module_as_main
[rank0]: File "<frozen runpy>", line 88, in _run_code
[rank0]: File "/home/liangchen/ts_agent/train_phi4.py", line 236, in <module>
[rank0]: train_with_memory_tracking()
[rank0]: File "/home/liangchen/ts_agent/train_phi4.py", line 48, in wrapper
[rank0]: result = func(*args, **kwargs)
...
[rank0]: File "/home/liangchen/miniconda3/envs/ts_agent/lib/python3.11/site-packages/unsloth_zoo/gradient_checkpointing.py", line 767, in unsloth_checkpoint
[rank0]: return UnslothCheckpointFunction.apply(function, preserve, *args)
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[rank0]: File "/home/liangchen/miniconda3/envs/ts_agent/lib/python3.11/site-packages/torch/autograd/function.py", line 575, in apply
[rank0]: return super().apply(*args, **kwargs) # type: ignore[misc]
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[rank0]: File "/home/liangchen/miniconda3/envs/ts_agent/lib/python3.11/site-packages/unsloth_zoo/gradient_checkpointing.py", line 441, in forward
[rank0]: if new_size > x.numel(): x.resize_(new_size)
[rank0]: ^^^^^^^^^^^^^^^^^^^
[rank0]: RuntimeError: CUDA error: out of memory
[rank0]: CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.
[rank0]: For debugging consider passing CUDA_LAUNCH_BLOCKING=1
[rank0]: Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.
-------------------- Question:
Triangle $ABC$ has $AB=21$ ,
...
=== Entering compute_loss ===
Memory before: 4.05 GB
Memory reserved before: 4.97 GB
input_ids.shape:torch.Size([4, 1040])
[EOF]
```