Skip to content

Commit

Permalink
cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
Qubitium committed Feb 28, 2025
1 parent fbf909f commit 9caf45f
Showing 1 changed file with 2 additions and 5 deletions.
7 changes: 2 additions & 5 deletions src/peft/tuners/lora/gptq.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,11 +115,8 @@ def dispatch_gptq(
cfg = kwargs.get("gptq_quantization_config", None)

if is_gptqmodel_available():
from gptqmodel.nn_modules.qlinear import BaseQuantLinear

if isinstance(target_base_layer, BaseQuantLinear):
new_module = GPTQLoraLinear(target, adapter_name, **kwargs)
target.qweight = target_base_layer.qweight
new_module = GPTQLoraLinear(target, adapter_name, **kwargs)
target.qweight = target_base_layer.qweight
else:
quant_linear = get_auto_gptq_quant_linear(cfg)

Expand Down

0 comments on commit 9caf45f

Please sign in to comment.