[Quantizers] add is_compileable property to quantizers. (#11736)

add is_compileable property to quantizers.
This commit is contained in:
Sayak Paul
2025-06-19 07:45:06 +05:30
committed by GitHub
parent 66394bf6c7
commit 48eae6f420
5 changed files with 21 additions and 0 deletions

View File

@@ -227,3 +227,8 @@ class DiffusersQuantizer(ABC):
@property
@abstractmethod
def is_trainable(self): ...
@property
def is_compileable(self) -> bool:
"""Flag indicating whether the quantized model can be compiled"""
return False

View File

@@ -564,6 +564,10 @@ class BnB8BitDiffusersQuantizer(DiffusersQuantizer):
# Because we're mandating `bitsandbytes` 0.43.3.
return True
@property
def is_compileable(self) -> bool:
return True
def _dequantize(self, model):
from .utils import dequantize_and_replace

View File

@@ -146,6 +146,10 @@ class GGUFQuantizer(DiffusersQuantizer):
def is_trainable(self) -> bool:
return False
@property
def is_compileable(self) -> bool:
return True
def _dequantize(self, model):
is_model_on_cpu = model.device.type == "cpu"
if is_model_on_cpu:

View File

@@ -175,3 +175,7 @@ class QuantoQuantizer(DiffusersQuantizer):
@property
def is_serializable(self):
return True
@property
def is_compileable(self) -> bool:
return True

View File

@@ -335,3 +335,7 @@ class TorchAoHfQuantizer(DiffusersQuantizer):
@property
def is_trainable(self):
return self.quantization_config.quant_type.startswith("int8")
@property
def is_compileable(self) -> bool:
return True