You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Hi, I'm trying to run the script for PEFT available in the examples folder of autogptq but I get the following error:
ValueError: Target modules [] not found in the base model. Please check the target modules and try again.**
The model I'm using is llama2 7b, which I previously quantized (4bit-g128) using auto_gptq library. I already tried passing the modules to the "target_modules" argument of GPTQLoraConfig but it didn't work either.
Any ideas how to solve it?
Thanks!
Code:
import json
import os
from functools import partial
import torch
from datasets import Dataset
from peft import TaskType
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer, get_linear_schedule_with_warmup
from auto_gptq import AutoGPTQForCausalLM, get_gptq_peft_model
from auto_gptq.utils.data_utils import collate_data, make_data_block
from auto_gptq.utils.peft_utils import GPTQLoraConfig
os.environ["TOKENIZERS_PARALLELISM"] = "false"
model_name_or_path = r'/home/ubuntu/...'
tokenizer_name_or_path = r'/home/ubuntu/...'
lr = 3e-5
num_epochs = 1
peft_config = GPTQLoraConfig(
r=8,
lora_alpha=32,
lora_dropout=0.1,
task_type=TaskType.CAUSAL_LM,
inference_mode=False,
auto_mapping=True,
)
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, use_fast=True)
if not tokenizer.pad_token_id:
tokenizer.pad_token_id = tokenizer.eos_token_id
model = AutoGPTQForCausalLM.from_quantized(
model_name_or_path,
use_triton=True,
warmup_triton=False,
trainable=True,
inject_fused_attention=False,
inject_fused_mlp=False,
disable_exllama=True,
disable_exllamav2=True
)
model.warmup_triton()
device = model.device
model = get_gptq_peft_model(model=model,
peft_config=peft_config,
auto_find_all_linears=True,
train_mode=True)
model.print_trainable_parameters()
More about the error:
ValueError Traceback (most recent call last)
File ./fine_tune_gptq_lora.py:4
[1](https://file+.vscode-resource.vscode-cdn.net/./fine_tune_gptq_lora.py:1) #%%
[2](https://file+.vscode-resource.vscode-cdn.net/./fine_tune_gptq_lora.py:2) # model.warmup_triton()
[3](https://file+.vscode-resource.vscode-cdn.net/./fine_tune_gptq_lora.py:3) device = model.device
----> [4](https://file+.vscode-resource.vscode-cdn.net/./fine_tune_gptq_lora.py:4) model = get_gptq_peft_model(model=model,
[5](https://file+.vscode-resource.vscode-cdn.net/./fine_tune_gptq_lora.py:5) peft_config=peft_config,
[6](https://file+.vscode-resource.vscode-cdn.net/./fine_tune_gptq_lora.py:6) auto_find_all_linears=True,
[7](https://file+.vscode-resource.vscode-cdn.net/./fine_tune_gptq_lora.py:7) train_mode=True)
[8](https://file+.vscode-resource.vscode-cdn.net/./fine_tune_gptq_lora.py:8) model.print_trainable_parameters()
File ~/Documents/AutoGPTQ/auto_gptq/utils/peft_utils.py:402, in get_gptq_peft_model(model, peft_config, model_id, adapter_name, auto_find_all_linears, train_mode)
[400](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/AutoGPTQ/auto_gptq/utils/peft_utils.py:400) try:
[401](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/AutoGPTQ/auto_gptq/utils/peft_utils.py:401) if train_mode:
--> [402](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/AutoGPTQ/auto_gptq/utils/peft_utils.py:402) peft_model = get_peft_model(model.model, peft_config, adapter_name=adapter_name)
[403](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/AutoGPTQ/auto_gptq/utils/peft_utils.py:403) else:
[404](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/AutoGPTQ/auto_gptq/utils/peft_utils.py:404) peft_model = PeftModel.from_pretrained(model.model, model_id, adapter_name)
File ~/Documents/.venv_py38/lib/python3.8/site-packages/peft/mapping.py:133, in get_peft_model(model, peft_config, adapter_name, mixed)
[131](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/mapping.py:131) if peft_config.is_prompt_learning:
[132](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/mapping.py:132) peft_config = _prepare_prompt_learning_config(peft_config, model_config)
--> [133](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/mapping.py:133) return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](model, peft_config, adapter_name=adapter_name)
File ~/Documents/.venv_py38/lib/python3.8/site-packages/peft/peft_model.py:1041, in PeftModelForCausalLM.__init__(self, model, peft_config, adapter_name)
[1040](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/peft_model.py:1040) def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
-> [1041](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/peft_model.py:1041) super().__init__(model, peft_config, adapter_name)
[1042](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/peft_model.py:1042) self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
File ~/Documents/.venv_py38/lib/python3.8/site-packages/peft/peft_model.py:123, in PeftModel.__init__(self, model, peft_config, adapter_name)
[121](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/peft_model.py:121) self._peft_config = None
[122](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/peft_model.py:122) cls = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type]
--> [123](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/peft_model.py:123) self.base_model = cls(model, {adapter_name: peft_config}, adapter_name)
[124](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/peft_model.py:124) self.set_additional_trainable_modules(peft_config, adapter_name)
[126](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/peft_model.py:126) self.config = getattr(self.base_model, "config", {"model_type": "custom"})
File ~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/lora/model.py:119, in LoraModel.__init__(self, model, config, adapter_name)
[118](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/lora/model.py:118) def __init__(self, model, config, adapter_name) -> None:
--> [119](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/lora/model.py:119) super().__init__(model, config, adapter_name)
File ~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/tuners_utils.py:95, in BaseTuner.__init__(self, model, peft_config, adapter_name)
[92](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/tuners_utils.py:92) if not hasattr(self, "config"):
[93](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/tuners_utils.py:93) self.config = {"model_type": "custom"}
---> [95](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/tuners_utils.py:95) self.inject_adapter(self.model, adapter_name)
[97](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/tuners_utils.py:97) # Copy the peft_config in the injected model.
[98](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/tuners_utils.py:98) self.model.peft_config = self.peft_config
File ~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/tuners_utils.py:255, in BaseTuner.inject_adapter(self, model, adapter_name)
[252](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/tuners_utils.py:252) self._create_and_replace(peft_config, adapter_name, target, target_name, parent, **optional_kwargs)
[254](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/tuners_utils.py:254) if not is_target_modules_in_base_model:
--> [255](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/tuners_utils.py:255) raise ValueError(
[256](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/tuners_utils.py:256) f"Target modules {peft_config.target_modules} not found in the base model. "
[257](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/tuners_utils.py:257) f"Please check the target modules and try again."
[258](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/tuners_utils.py:258) )
[260](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/tuners_utils.py:260) self._mark_only_adapters_as_trainable()
[262](https://file+.vscode-resource.vscode-cdn.net/home/ubuntu/Documents/~/Documents/.venv_py38/lib/python3.8/site-packages/peft/tuners/tuners_utils.py:262) if self.peft_config[adapter_name].inference_mode:
ValueError: Target modules [] not found in the base model. Please check the target modules and try again.
The text was updated successfully, but these errors were encountered:
Hi, I'm trying to run the script for PEFT available in the examples folder of autogptq but I get the following error:
ValueError: Target modules [] not found in the base model. Please check the target modules and try again.**
The model I'm using is llama2 7b, which I previously quantized (4bit-g128) using auto_gptq library. I already tried passing the modules to the "target_modules" argument of GPTQLoraConfig but it didn't work either.
Any ideas how to solve it?
Thanks!
Code:
More about the error:
The text was updated successfully, but these errors were encountered: