Skip to content

Commit

Permalink
revert
Browse files Browse the repository at this point in the history
  • Loading branch information
Qubitium committed Feb 28, 2025
1 parent 9caf45f commit 5531ddd
Showing 1 changed file with 5 additions and 2 deletions.
7 changes: 5 additions & 2 deletions src/peft/tuners/lora/gptq.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,11 @@ def dispatch_gptq(
cfg = kwargs.get("gptq_quantization_config", None)

if is_gptqmodel_available():
new_module = GPTQLoraLinear(target, adapter_name, **kwargs)
target.qweight = target_base_layer.qweight
from gptqmodel.nn_modules.qlinear import BaseQuantLinear

if isinstance(target_base_layer, BaseQuantLinear):
new_module = GPTQLoraLinear(target, adapter_name, **kwargs)
target.qweight = target_base_layer.qweight
else:
quant_linear = get_auto_gptq_quant_linear(cfg)

Expand Down

0 comments on commit 5531ddd

Please sign in to comment.