From 9caf45fe3e64584df3e20dcdfeab7f89246a4925 Mon Sep 17 00:00:00 2001 From: Qubitium Date: Fri, 28 Feb 2025 15:15:24 +0000 Subject: [PATCH] cleanup --- src/peft/tuners/lora/gptq.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/peft/tuners/lora/gptq.py b/src/peft/tuners/lora/gptq.py index d16cfffb58..369f445fae 100644 --- a/src/peft/tuners/lora/gptq.py +++ b/src/peft/tuners/lora/gptq.py @@ -115,11 +115,8 @@ def dispatch_gptq( cfg = kwargs.get("gptq_quantization_config", None) if is_gptqmodel_available(): - from gptqmodel.nn_modules.qlinear import BaseQuantLinear - - if isinstance(target_base_layer, BaseQuantLinear): - new_module = GPTQLoraLinear(target, adapter_name, **kwargs) - target.qweight = target_base_layer.qweight + new_module = GPTQLoraLinear(target, adapter_name, **kwargs) + target.qweight = target_base_layer.qweight else: quant_linear = get_auto_gptq_quant_linear(cfg)