Runtime error: CUDA out of memory, not sure if accelerate offloading is working

Hi, I’m trying to load TheBloke/Phind-CodeLlama-34B-v2-GPTQ for inference, but when loading from pretrained it gives me CUDA out of memory error.
Having only 15GB of VRAM it wasn’t a surprise, but I was hoping that via offloading to RAM and Disk I would have manage to load it.
But it gives me the error anyway, so I’m supposing the model is not offloaded correctly.

Here’s the code:

@st.cache_resource
def initModel():
  
    config = AutoConfig.from_pretrained("TheBloke/Phind-CodeLlama-34B-v2-GPTQ")
    with init_empty_weights():
        model = AutoModelForCausalLM.from_config(config)

        device_map = infer_auto_device_map(model)
        print(device_map)

    print("Loading the model")
    model_name_or_path = "TheBloke/Phind-CodeLlama-34B-v2-GPTQ"
    model = AutoModelForCausalLM.from_pretrained(model_name_or_path,
                                            trust_remote_code=True,
                                            device_map = "auto",
                                            offload_folder="offload", 
                                            offload_state_dict = True,
                                            revision="main")
    tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
    print("type of model:", type(model))

    generator = transformers.pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer,
    max_new_tokens=256,
    do_sample=True,
    temperature=0.7,
    top_p=0.95,
    top_k=40,
    repetition_penalty=1.1
    )
 return generator

Here’s the error:

OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB (GPU 0; 14.76 GiB total capacity; 14.21 GiB already allocated; 67.75 MiB free; 14.23 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
Traceback:
File "/home/user/.pyenv/versions/3.10.13/lib/python3.10/site-packages/streamlit/runtime/scriptrunner/script_runner.py", line 541, in _run_script
    exec(code, module.__dict__)
File "/home/user/app/app.py", line 189, in <module>
    generator = initModel()
File "/home/user/.pyenv/versions/3.10.13/lib/python3.10/site-packages/streamlit/runtime/caching/cache_utils.py", line 211, in wrapper
    return cached_func(*args, **kwargs)
File "/home/user/.pyenv/versions/3.10.13/lib/python3.10/site-packages/streamlit/runtime/caching/cache_utils.py", line 240, in __call__
    return self._get_or_create_cached_value(args, kwargs)
File "/home/user/.pyenv/versions/3.10.13/lib/python3.10/site-packages/streamlit/runtime/caching/cache_utils.py", line 266, in _get_or_create_cached_value
    return self._handle_cache_miss(cache, value_key, func_args, func_kwargs)
File "/home/user/.pyenv/versions/3.10.13/lib/python3.10/site-packages/streamlit/runtime/caching/cache_utils.py", line 320, in _handle_cache_miss
    computed_value = self._info.func(*func_args, **func_kwargs)
File "/home/user/app/app.py", line 158, in initModel
    model = AutoModelForCausalLM.from_pretrained(model_name_or_path,
File "/home/user/.pyenv/versions/3.10.13/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py", line 563, in from_pretrained
    return model_class.from_pretrained(
File "/home/user/.pyenv/versions/3.10.13/lib/python3.10/site-packages/transformers/modeling_utils.py", line 3180, in from_pretrained
    ) = cls._load_pretrained_model(
File "/home/user/.pyenv/versions/3.10.13/lib/python3.10/site-packages/transformers/modeling_utils.py", line 3568, in _load_pretrained_model
    new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(
File "/home/user/.pyenv/versions/3.10.13/lib/python3.10/site-packages/transformers/modeling_utils.py", line 745, in _load_state_dict_into_meta_model
    set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)
File "/home/user/.pyenv/versions/3.10.13/lib/python3.10/site-packages/accelerate/utils/modeling.py", line 317, in set_module_tensor_to_device
    new_value = value.to(device)

I’ve also computed the device map as an extra check:

{'model.embed_tokens': 0, 'model.layers.0': 0, 'model.layers.1': 0, 'model.layers.2': 0, 'model.layers.3': 0, 'model.layers.4.self_attn': 0, 'model.layers.4.mlp.gate_proj': 0, 'model.layers.4.mlp.up_proj': 0, 'model.layers.4.mlp.down_proj': 'cpu', 'model.layers.4.mlp.act_fn': 'cpu', 'model.layers.4.input_layernorm': 'cpu', 'model.layers.4.post_attention_layernorm': 'cpu', 'model.layers.5': 'cpu', 'model.layers.6': 'cpu', 'model.layers.7': 'cpu', 'model.layers.8.self_attn': 'cpu', 'model.layers.8.mlp.gate_proj': 'cpu', 'model.layers.8.mlp.up_proj': 'cpu', 'model.layers.8.mlp.down_proj': 'disk', 'model.layers.8.mlp.act_fn': 'disk', 'model.layers.8.input_layernorm': 'disk', 'model.layers.8.post_attention_layernorm': 'disk', 'model.layers.9': 'disk', 'model.layers.10': 'disk', 'model.layers.11': 'disk', 'model.layers.12': 'disk', 'model.layers.13': 'disk', 'model.layers.14': 'disk', 'model.layers.15': 'disk', 'model.layers.16': 'disk', 'model.layers.17': 'disk', 'model.layers.18': 'disk', 'model.layers.19': 'disk', 'model.layers.20': 'disk', 'model.layers.21': 'disk', 'model.layers.22': 'disk', 'model.layers.23': 'disk', 'model.layers.24': 'disk', 'model.layers.25': 'disk', 'model.layers.26': 'disk', 'model.layers.27': 'disk', 'model.layers.28': 'disk', 'model.layers.29': 'disk', 'model.layers.30': 'disk', 'model.layers.31': 'disk', 'model.layers.32': 'disk', 'model.layers.33': 'disk', 'model.layers.34': 'disk', 'model.layers.35': 'disk', 'model.layers.36': 'disk', 'model.layers.37': 'disk', 'model.layers.38': 'disk', 'model.layers.39': 'disk', 'model.layers.40': 'disk', 'model.layers.41': 'disk', 'model.layers.42': 'disk', 'model.layers.43': 'disk', 'model.layers.44': 'disk', 'model.layers.45': 'disk', 'model.layers.46': 'disk', 'model.layers.47': 'disk', 'model.norm': 'disk', 'lm_head': 'disk'}

Space Hardware:
Nvidia T4 small, 4vCPU, 15 GB of RAM, 15 GB of VRAM, 20 GB of persistent storage.

Thanks for your time :blush: