Compare commits

...

10 Commits

Author SHA1 Message Date
DN6
16ebfb7754 update 2025-03-11 08:46:42 +05:30
DN6
53977eedef Merge branch 'main' into torchao-load 2025-03-11 08:10:05 +05:30
DN6
fdf1c11e18 update 2025-03-10 17:10:26 +05:30
DN6
6cf941c69f update 2025-03-10 17:07:54 +05:30
DN6
280a0aca4c update 2025-03-10 17:00:43 +05:30
DN6
9297598dff update 2025-03-10 16:59:02 +05:30
DN6
6a0ae75b55 update 2025-03-10 16:35:49 +05:30
DN6
08b8503ffb update 2025-03-10 16:29:41 +05:30
DN6
56ec287e8a update 2025-03-10 16:02:48 +05:30
Dhruv Nair
8db89e7453 update 2025-03-10 06:55:44 +01:00
5 changed files with 70 additions and 16 deletions

View File

@@ -126,7 +126,7 @@ image = pipe(prompt, num_inference_steps=30, guidance_scale=7.0).images[0]
image.save("output.png") image.save("output.png")
``` ```
Some quantization methods, such as `uint4wo`, cannot be loaded directly and may result in an `UnpicklingError` when trying to load the models, but work as expected when saving them. In order to work around this, one can load the state dict manually into the model. Note, however, that this requires using `weights_only=False` in `torch.load`, so it should be run only if the weights were obtained from a trustable source. If you are using `torch<=2.6.0`, some quantization methods, such as `uint4wo`, cannot be loaded directly and may result in an `UnpicklingError` when trying to load the models, but work as expected when saving them. In order to work around this, one can load the state dict manually into the model. Note, however, that this requires using `weights_only=False` in `torch.load`, so it should be run only if the weights were obtained from a trustable source.
```python ```python
import torch import torch

View File

@@ -2,20 +2,14 @@ __version__ = "0.33.0.dev0"
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from diffusers.quantizers import quantization_config
from diffusers.utils import dummy_gguf_objects
from diffusers.utils.import_utils import (
is_bitsandbytes_available,
is_gguf_available,
is_optimum_quanto_version,
is_torchao_available,
)
from .utils import ( from .utils import (
DIFFUSERS_SLOW_IMPORT, DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable, OptionalDependencyNotAvailable,
_LazyModule, _LazyModule,
is_accelerate_available,
is_bitsandbytes_available,
is_flax_available, is_flax_available,
is_gguf_available,
is_k_diffusion_available, is_k_diffusion_available,
is_librosa_available, is_librosa_available,
is_note_seq_available, is_note_seq_available,
@@ -24,6 +18,7 @@ from .utils import (
is_scipy_available, is_scipy_available,
is_sentencepiece_available, is_sentencepiece_available,
is_torch_available, is_torch_available,
is_torchao_available,
is_torchsde_available, is_torchsde_available,
is_transformers_available, is_transformers_available,
) )
@@ -65,7 +60,7 @@ _import_structure = {
} }
try: try:
if not is_bitsandbytes_available(): if not is_torch_available() and not is_accelerate_available() and not is_bitsandbytes_available():
raise OptionalDependencyNotAvailable() raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable: except OptionalDependencyNotAvailable:
from .utils import dummy_bitsandbytes_objects from .utils import dummy_bitsandbytes_objects
@@ -77,7 +72,7 @@ else:
_import_structure["quantizers.quantization_config"].append("BitsAndBytesConfig") _import_structure["quantizers.quantization_config"].append("BitsAndBytesConfig")
try: try:
if not is_gguf_available(): if not is_torch_available() and not is_accelerate_available() and not is_gguf_available():
raise OptionalDependencyNotAvailable() raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable: except OptionalDependencyNotAvailable:
from .utils import dummy_gguf_objects from .utils import dummy_gguf_objects
@@ -89,7 +84,7 @@ else:
_import_structure["quantizers.quantization_config"].append("GGUFQuantizationConfig") _import_structure["quantizers.quantization_config"].append("GGUFQuantizationConfig")
try: try:
if not is_torchao_available(): if not is_torch_available() and not is_accelerate_available() and not is_torchao_available():
raise OptionalDependencyNotAvailable() raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable: except OptionalDependencyNotAvailable:
from .utils import dummy_torchao_objects from .utils import dummy_torchao_objects
@@ -101,7 +96,7 @@ else:
_import_structure["quantizers.quantization_config"].append("TorchAoConfig") _import_structure["quantizers.quantization_config"].append("TorchAoConfig")
try: try:
if not is_optimum_quanto_available(): if not is_torch_available() and not is_accelerate_available() and not is_optimum_quanto_available():
raise OptionalDependencyNotAvailable() raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable: except OptionalDependencyNotAvailable:
from .utils import dummy_optimum_quanto_objects from .utils import dummy_optimum_quanto_objects
@@ -112,7 +107,6 @@ except OptionalDependencyNotAvailable:
else: else:
_import_structure["quantizers.quantization_config"].append("QuantoConfig") _import_structure["quantizers.quantization_config"].append("QuantoConfig")
try: try:
if not is_onnx_available(): if not is_onnx_available():
raise OptionalDependencyNotAvailable() raise OptionalDependencyNotAvailable()

View File

@@ -23,7 +23,14 @@ from typing import TYPE_CHECKING, Any, Dict, List, Union
from packaging import version from packaging import version
from ...utils import get_module_from_name, is_torch_available, is_torch_version, is_torchao_available, logging from ...utils import (
get_module_from_name,
is_torch_available,
is_torch_version,
is_torchao_available,
is_torchao_version,
logging,
)
from ..base import DiffusersQuantizer from ..base import DiffusersQuantizer
@@ -62,6 +69,43 @@ if is_torchao_available():
from torchao.quantization import quantize_ from torchao.quantization import quantize_
def _update_torch_safe_globals():
safe_globals = [
(torch.uint1, "torch.uint1"),
(torch.uint2, "torch.uint2"),
(torch.uint3, "torch.uint3"),
(torch.uint4, "torch.uint4"),
(torch.uint5, "torch.uint5"),
(torch.uint6, "torch.uint6"),
(torch.uint7, "torch.uint7"),
]
try:
from torchao.dtypes import NF4Tensor
from torchao.dtypes.floatx.float8_layout import Float8AQTTensorImpl
from torchao.dtypes.uintx.uint4_layout import UInt4Tensor
from torchao.dtypes.uintx.uintx_layout import UintxAQTTensorImpl, UintxTensor
safe_globals.extend([UintxTensor, UInt4Tensor, UintxAQTTensorImpl, Float8AQTTensorImpl, NF4Tensor])
except (ImportError, ModuleNotFoundError) as e:
logger.warning(
"Unable to import `torchao` Tensor objects. This may affect loading checkpoints serialized with `torchao`"
)
logger.debug(e)
finally:
torch.serialization.add_safe_globals(safe_globals=safe_globals)
if (
is_torch_available()
and is_torch_version(">=", "2.6.0")
and is_torchao_available()
and is_torchao_version(">=", "0.7.0")
):
_update_torch_safe_globals()
logger = logging.get_logger(__name__) logger = logging.get_logger(__name__)

View File

@@ -94,6 +94,7 @@ from .import_utils import (
is_torch_xla_available, is_torch_xla_available,
is_torch_xla_version, is_torch_xla_version,
is_torchao_available, is_torchao_available,
is_torchao_version,
is_torchsde_available, is_torchsde_available,
is_torchvision_available, is_torchvision_available,
is_transformers_available, is_transformers_available,

View File

@@ -868,6 +868,21 @@ def is_gguf_version(operation: str, version: str):
return compare_versions(parse(_gguf_version), operation, version) return compare_versions(parse(_gguf_version), operation, version)
def is_torchao_version(operation: str, version: str):
"""
Compares the current torchao version to a given reference with an operation.
Args:
operation (`str`):
A string representation of an operator, such as `">"` or `"<="`
version (`str`):
A version string
"""
if not _is_torchao_available:
return False
return compare_versions(parse(_torchao_version), operation, version)
def is_k_diffusion_version(operation: str, version: str): def is_k_diffusion_version(operation: str, version: str):
""" """
Compares the current k-diffusion version to a given reference with an operation. Compares the current k-diffusion version to a given reference with an operation.