From 5531dddbb0ad7cda516b562bc6226a911c96a78e Mon Sep 17 00:00:00 2001 From: Qubitium Date: Fri, 28 Feb 2025 15:18:07 +0000 Subject: [PATCH] revert --- src/peft/tuners/lora/gptq.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/peft/tuners/lora/gptq.py b/src/peft/tuners/lora/gptq.py index 369f445fae..d16cfffb58 100644 --- a/src/peft/tuners/lora/gptq.py +++ b/src/peft/tuners/lora/gptq.py @@ -115,8 +115,11 @@ def dispatch_gptq( cfg = kwargs.get("gptq_quantization_config", None) if is_gptqmodel_available(): - new_module = GPTQLoraLinear(target, adapter_name, **kwargs) - target.qweight = target_base_layer.qweight + from gptqmodel.nn_modules.qlinear import BaseQuantLinear + + if isinstance(target_base_layer, BaseQuantLinear): + new_module = GPTQLoraLinear(target, adapter_name, **kwargs) + target.qweight = target_base_layer.qweight else: quant_linear = get_auto_gptq_quant_linear(cfg)