I am using MambaForCausalLM from mamba-hf git repository. I want to finetune the model using LoRA from peft library. The model is able to train without LoRA adapter but once I install it, it stops working. My code for LoRA training is as follows:
def print_trainable_parameters(model):
“”"
Prints the number of trainable parameters in the model.
“”"
trainable_params = 0
all_param = 0
for _, param in model.named_parameters():
all_param += param.numel()
if param.requires_grad:
trainable_params += param.numel()
print(
f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}"
)
def load_data(data_path):
return load_dataset(data_path)
def load_model_pretrained(config):
return MambaForCausalLM.from_pretrained(config)
def load_tokenizer(path):
return AutoTokenizer.from_pretrained(path)
def make_config(json):
config = MambaConfig(
vocab_size = json[“vocab_size”],
d_model = json[“d_model”],
d_conv = json[“d_conv”],
expand = json[“expand”],
conv_bias = json[“conv_bias”],
bias = json[“bias”],
n_layer = json[“n_layer”],
dt_rank = json[“dt_rank”],
pad_vocab_size_multiple = json[“pad_vocab_size_multiple”],
initializer_range = json[“initializer_range”],
)
return config
def split_data(data):
train_size = int(len(data) * 0.8)
valid_size = len(data) - train_size
ds_train = data.select(list(range(train_size)))
ds_valid = data.select(list(range(train_size, train_size + valid_size)))
return DatasetDict({"train": ds_train, "valid": ds_valid})
def load_model_with_LoRA(model, target_modules):
config = LoraConfig(
target_modules = target_modules,
task_type=“CAUSAL_LM”)
m1 = get_peft_model(model, config)
m1.print_trainable_parameters()
m1.save_pretrained(“./wts/adapter”)
return m1
def tokenize(data):
outputs = tokenizer(
data[“tgt”],
truncation=True,
max_length=1024,
return_overflowing_tokens=True,
return_length=True,
)
input_batch =
for length, input_ids in zip(outputs[“length”], outputs[“input_ids”]):
if length != 0:
input_batch.append(input_ids)
return {“input_ids”: input_batch}
class MambaTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
input_ids = inputs.pop(“input_ids”)
print(input_ids.shape)
lm_logits = model(input_ids)[0]
print(lm_logits.keys())
labels = input_ids.to(lm_logits.device)
shift_logits = lm_logits[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = torch.nn.CrossEntropyLoss()
lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1))
def train_lora(json, train_args):
config_data = load_json(json)
model_config = make_config(config_data)
model = load_model_pretrained(config_data["model_path"])
print(model)
model = load_model_with_LoRA(model, [config_data["target_modules"]])
tok = load_tokenizer(config_data["tokenizer_path"])
data = load_data(config_data["data"])
data = split_data(data)
tokenized_data = data.map(tokenize, batched=True, remove_columns=data["train"].column_names)
trainer = MambaTrainer( model=model, tokenizer=tok, args=train_args,
train_dataset=tokenized_data["train"], eval_dataset=tokenized_data["valid"])
trainer.train()
data = {
“model_path” : “Q-bert/Mamba-130M”,
“tokenizer_path”: “google/byt5-large”,
“target_modules”: “x_proj”,
“adapter_path” : “mlsquare/exp_model_adapter”,
“data” : “mlsquare/samantar1per_cent_merged_with_train_val”
}
Convert the dictionary to JSON format
json_data = json.dumps(data, indent=4)
Save the JSON data to a file
with open(“model_parameters_lora.json”, “w”) as json_file:
json_file.write(json_data)
I want to understand why my model is not training. I get this error:
TypeError: Caught TypeError in replica 0 on device 0.
Original Traceback (most recent call last):
File “/home/yashc/.local/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py”, line 64, in _worker
output = module(*input, **kwargs)
File “/home/yashc/.local/lib/python3.10/site-packages/torch/nn/modules/module.py”, line 1194, in _call_impl
return forward_call(*input, **kwargs)
File “/home/yashc/.local/lib/python3.10/site-packages/peft/peft_model.py”, line 1083, in forward
return self.base_model(
File “/home/yashc/.local/lib/python3.10/site-packages/torch/nn/modules/module.py”, line 1194, in _call_impl
return forward_call(*input, **kwargs)
File “/home/yashc/.local/lib/python3.10/site-packages/peft/tuners/tuners_utils.py”, line 161, in forward
return self.model.forward(*args, **kwargs)
TypeError: MambaForCausalLM.forward() got an unexpected keyword argument ‘attention_mask’
My training code is as follows:
train_args = TrainingArguments(
output_dir=“mamba”,
per_device_train_batch_size=1,
per_device_eval_batch_size=1,
evaluation_strategy=“steps”,
num_train_epochs=4,
weight_decay=0.1,
warmup_steps=1_000,
lr_scheduler_type=“cosine”,
learning_rate=5e-4,
save_steps=5_000,
fp16=False,
)
tokenizer = AutoTokenizer.from_pretrained(“google/byt5-large”)
train_lora(“model_parameters_lora.json”, train_args)