mirror of
https://github.com/huggingface/diffusers.git
synced 2026-03-14 04:27:55 +08:00
Compare commits
4 Commits
ltx-test-r
...
z-image-te
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
73b23dc92e | ||
|
|
c15472d2c4 | ||
|
|
d15761686a | ||
|
|
5352999e14 |
@@ -36,7 +36,7 @@ from typing import Any, Callable
|
||||
|
||||
from packaging import version
|
||||
|
||||
from ..utils import deprecate, is_torch_available, is_torchao_available, is_torchao_version, logging
|
||||
from ..utils import is_torch_available, is_torchao_available, is_torchao_version, logging
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
@@ -844,8 +844,6 @@ class QuantoConfig(QuantizationConfigMixin):
|
||||
modules_to_not_convert: list[str] | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
deprecation_message = "`QuantoConfig` is deprecated and will be removed in version 1.0.0."
|
||||
deprecate("QuantoConfig", "1.0.0", deprecation_message)
|
||||
self.quant_method = QuantizationMethod.QUANTO
|
||||
self.weights_dtype = weights_dtype
|
||||
self.modules_to_not_convert = modules_to_not_convert
|
||||
|
||||
@@ -3,7 +3,6 @@ from typing import TYPE_CHECKING, Any
|
||||
from diffusers.utils.import_utils import is_optimum_quanto_version
|
||||
|
||||
from ...utils import (
|
||||
deprecate,
|
||||
get_module_from_name,
|
||||
is_accelerate_available,
|
||||
is_accelerate_version,
|
||||
@@ -43,9 +42,6 @@ class QuantoQuantizer(DiffusersQuantizer):
|
||||
super().__init__(quantization_config, **kwargs)
|
||||
|
||||
def validate_environment(self, *args, **kwargs):
|
||||
deprecation_message = "The Quanto quantizer is deprecated and will be removed in version 1.0.0."
|
||||
deprecate("QuantoQuantizer", "1.0.0", deprecation_message)
|
||||
|
||||
if not is_optimum_quanto_available():
|
||||
raise ImportError(
|
||||
"Loading an optimum-quanto quantized model requires optimum-quanto library (`pip install optimum-quanto`)"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2025 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -12,47 +13,59 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
from diffusers import LTXVideoTransformer3DModel
|
||||
from diffusers.utils.torch_utils import randn_tensor
|
||||
|
||||
from ...testing_utils import enable_full_determinism, torch_device
|
||||
from ..testing_utils import (
|
||||
BaseModelTesterConfig,
|
||||
MemoryTesterMixin,
|
||||
ModelTesterMixin,
|
||||
TorchCompileTesterMixin,
|
||||
TrainingTesterMixin,
|
||||
)
|
||||
from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin
|
||||
|
||||
|
||||
enable_full_determinism()
|
||||
|
||||
|
||||
class LTXTransformerTesterConfig(BaseModelTesterConfig):
|
||||
@property
|
||||
def model_class(self):
|
||||
return LTXVideoTransformer3DModel
|
||||
class LTXTransformerTests(ModelTesterMixin, unittest.TestCase):
|
||||
model_class = LTXVideoTransformer3DModel
|
||||
main_input_name = "hidden_states"
|
||||
uses_custom_attn_processor = True
|
||||
|
||||
@property
|
||||
def output_shape(self) -> tuple[int, int]:
|
||||
return (512, 4)
|
||||
def dummy_input(self):
|
||||
batch_size = 2
|
||||
num_channels = 4
|
||||
num_frames = 2
|
||||
height = 16
|
||||
width = 16
|
||||
embedding_dim = 16
|
||||
sequence_length = 16
|
||||
|
||||
@property
|
||||
def input_shape(self) -> tuple[int, int]:
|
||||
return (512, 4)
|
||||
hidden_states = torch.randn((batch_size, num_frames * height * width, num_channels)).to(torch_device)
|
||||
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
|
||||
encoder_attention_mask = torch.ones((batch_size, sequence_length)).bool().to(torch_device)
|
||||
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
|
||||
|
||||
@property
|
||||
def main_input_name(self) -> str:
|
||||
return "hidden_states"
|
||||
|
||||
@property
|
||||
def generator(self):
|
||||
return torch.Generator("cpu").manual_seed(0)
|
||||
|
||||
def get_init_dict(self):
|
||||
return {
|
||||
"hidden_states": hidden_states,
|
||||
"encoder_hidden_states": encoder_hidden_states,
|
||||
"timestep": timestep,
|
||||
"encoder_attention_mask": encoder_attention_mask,
|
||||
"num_frames": num_frames,
|
||||
"height": height,
|
||||
"width": width,
|
||||
}
|
||||
|
||||
@property
|
||||
def input_shape(self):
|
||||
return (512, 4)
|
||||
|
||||
@property
|
||||
def output_shape(self):
|
||||
return (512, 4)
|
||||
|
||||
def prepare_init_args_and_inputs_for_common(self):
|
||||
init_dict = {
|
||||
"in_channels": 4,
|
||||
"out_channels": 4,
|
||||
"num_attention_heads": 2,
|
||||
@@ -62,57 +75,16 @@ class LTXTransformerTesterConfig(BaseModelTesterConfig):
|
||||
"qk_norm": "rms_norm_across_heads",
|
||||
"caption_channels": 16,
|
||||
}
|
||||
|
||||
def get_dummy_inputs(self) -> dict[str, torch.Tensor]:
|
||||
batch_size = 2
|
||||
num_channels = 4
|
||||
num_frames = 2
|
||||
height = 16
|
||||
width = 16
|
||||
embedding_dim = 16
|
||||
sequence_length = 16
|
||||
|
||||
return {
|
||||
"hidden_states": randn_tensor(
|
||||
(batch_size, num_frames * height * width, num_channels),
|
||||
generator=self.generator,
|
||||
device=torch_device,
|
||||
),
|
||||
"encoder_hidden_states": randn_tensor(
|
||||
(batch_size, sequence_length, embedding_dim), generator=self.generator, device=torch_device
|
||||
),
|
||||
"timestep": torch.randint(0, 1000, size=(batch_size,), generator=self.generator).to(torch_device),
|
||||
"encoder_attention_mask": torch.ones((batch_size, sequence_length)).bool().to(torch_device),
|
||||
"num_frames": num_frames,
|
||||
"height": height,
|
||||
"width": width,
|
||||
}
|
||||
|
||||
|
||||
class TestLTXTransformer(LTXTransformerTesterConfig, ModelTesterMixin):
|
||||
"""Core model tests for LTX Video Transformer."""
|
||||
|
||||
|
||||
class TestLTXTransformerMemory(LTXTransformerTesterConfig, MemoryTesterMixin):
|
||||
"""Memory optimization tests for LTX Video Transformer."""
|
||||
|
||||
|
||||
class TestLTXTransformerTraining(LTXTransformerTesterConfig, TrainingTesterMixin):
|
||||
"""Training tests for LTX Video Transformer."""
|
||||
inputs_dict = self.dummy_input
|
||||
return init_dict, inputs_dict
|
||||
|
||||
def test_gradient_checkpointing_is_applied(self):
|
||||
super().test_gradient_checkpointing_is_applied(expected_set={"LTXVideoTransformer3DModel"})
|
||||
expected_set = {"LTXVideoTransformer3DModel"}
|
||||
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
||||
|
||||
|
||||
class TestLTXTransformerCompile(LTXTransformerTesterConfig, TorchCompileTesterMixin):
|
||||
"""Torch compile tests for LTX Video Transformer."""
|
||||
class LTXTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase):
|
||||
model_class = LTXVideoTransformer3DModel
|
||||
|
||||
|
||||
# TODO: Add pretrained_model_name_or_path once a tiny LTX model is available on the Hub
|
||||
# class TestLTXTransformerBitsAndBytes(LTXTransformerTesterConfig, BitsAndBytesTesterMixin):
|
||||
# """BitsAndBytes quantization tests for LTX Video Transformer."""
|
||||
|
||||
|
||||
# TODO: Add pretrained_model_name_or_path once a tiny LTX model is available on the Hub
|
||||
# class TestLTXTransformerTorchAo(LTXTransformerTesterConfig, TorchAoTesterMixin):
|
||||
# """TorchAo quantization tests for LTX Video Transformer."""
|
||||
def prepare_init_args_and_inputs_for_common(self):
|
||||
return LTXTransformerTests().prepare_init_args_and_inputs_for_common()
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2025 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -12,49 +13,77 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import pytest
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
from diffusers import LTX2VideoTransformer3DModel
|
||||
from diffusers.utils.torch_utils import randn_tensor
|
||||
|
||||
from ...testing_utils import enable_full_determinism, torch_device
|
||||
from ..testing_utils import (
|
||||
AttentionTesterMixin,
|
||||
BaseModelTesterConfig,
|
||||
MemoryTesterMixin,
|
||||
ModelTesterMixin,
|
||||
TorchCompileTesterMixin,
|
||||
TrainingTesterMixin,
|
||||
)
|
||||
from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin
|
||||
|
||||
|
||||
enable_full_determinism()
|
||||
|
||||
|
||||
class LTX2TransformerTesterConfig(BaseModelTesterConfig):
|
||||
@property
|
||||
def model_class(self):
|
||||
return LTX2VideoTransformer3DModel
|
||||
class LTX2TransformerTests(ModelTesterMixin, unittest.TestCase):
|
||||
model_class = LTX2VideoTransformer3DModel
|
||||
main_input_name = "hidden_states"
|
||||
uses_custom_attn_processor = True
|
||||
|
||||
@property
|
||||
def output_shape(self) -> tuple[int, int]:
|
||||
return (512, 4)
|
||||
def dummy_input(self):
|
||||
# Common
|
||||
batch_size = 2
|
||||
|
||||
@property
|
||||
def input_shape(self) -> tuple[int, int]:
|
||||
return (512, 4)
|
||||
# Video
|
||||
num_frames = 2
|
||||
num_channels = 4
|
||||
height = 16
|
||||
width = 16
|
||||
|
||||
@property
|
||||
def main_input_name(self) -> str:
|
||||
return "hidden_states"
|
||||
# Audio
|
||||
audio_num_frames = 9
|
||||
audio_num_channels = 2
|
||||
num_mel_bins = 2
|
||||
|
||||
@property
|
||||
def generator(self):
|
||||
return torch.Generator("cpu").manual_seed(0)
|
||||
# Text
|
||||
embedding_dim = 16
|
||||
sequence_length = 16
|
||||
|
||||
hidden_states = torch.randn((batch_size, num_frames * height * width, num_channels)).to(torch_device)
|
||||
audio_hidden_states = torch.randn((batch_size, audio_num_frames, audio_num_channels * num_mel_bins)).to(
|
||||
torch_device
|
||||
)
|
||||
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
|
||||
audio_encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
|
||||
encoder_attention_mask = torch.ones((batch_size, sequence_length)).bool().to(torch_device)
|
||||
timestep = torch.rand((batch_size,)).to(torch_device) * 1000
|
||||
|
||||
def get_init_dict(self):
|
||||
return {
|
||||
"hidden_states": hidden_states,
|
||||
"audio_hidden_states": audio_hidden_states,
|
||||
"encoder_hidden_states": encoder_hidden_states,
|
||||
"audio_encoder_hidden_states": audio_encoder_hidden_states,
|
||||
"timestep": timestep,
|
||||
"encoder_attention_mask": encoder_attention_mask,
|
||||
"num_frames": num_frames,
|
||||
"height": height,
|
||||
"width": width,
|
||||
"audio_num_frames": audio_num_frames,
|
||||
"fps": 25.0,
|
||||
}
|
||||
|
||||
@property
|
||||
def input_shape(self):
|
||||
return (512, 4)
|
||||
|
||||
@property
|
||||
def output_shape(self):
|
||||
return (512, 4)
|
||||
|
||||
def prepare_init_args_and_inputs_for_common(self):
|
||||
init_dict = {
|
||||
"in_channels": 4,
|
||||
"out_channels": 4,
|
||||
"patch_size": 1,
|
||||
@@ -72,80 +101,122 @@ class LTX2TransformerTesterConfig(BaseModelTesterConfig):
|
||||
"caption_channels": 16,
|
||||
"rope_double_precision": False,
|
||||
}
|
||||
|
||||
def get_dummy_inputs(self) -> dict[str, torch.Tensor]:
|
||||
batch_size = 2
|
||||
num_frames = 2
|
||||
num_channels = 4
|
||||
height = 16
|
||||
width = 16
|
||||
audio_num_frames = 9
|
||||
audio_num_channels = 2
|
||||
num_mel_bins = 2
|
||||
embedding_dim = 16
|
||||
sequence_length = 16
|
||||
|
||||
return {
|
||||
"hidden_states": randn_tensor(
|
||||
(batch_size, num_frames * height * width, num_channels),
|
||||
generator=self.generator,
|
||||
device=torch_device,
|
||||
),
|
||||
"audio_hidden_states": randn_tensor(
|
||||
(batch_size, audio_num_frames, audio_num_channels * num_mel_bins),
|
||||
generator=self.generator,
|
||||
device=torch_device,
|
||||
),
|
||||
"encoder_hidden_states": randn_tensor(
|
||||
(batch_size, sequence_length, embedding_dim), generator=self.generator, device=torch_device
|
||||
),
|
||||
"audio_encoder_hidden_states": randn_tensor(
|
||||
(batch_size, sequence_length, embedding_dim), generator=self.generator, device=torch_device
|
||||
),
|
||||
"timestep": (randn_tensor((batch_size,), generator=self.generator, device=torch_device).abs() * 1000),
|
||||
"encoder_attention_mask": torch.ones((batch_size, sequence_length)).bool().to(torch_device),
|
||||
"num_frames": num_frames,
|
||||
"height": height,
|
||||
"width": width,
|
||||
"audio_num_frames": audio_num_frames,
|
||||
"fps": 25.0,
|
||||
}
|
||||
|
||||
|
||||
class TestLTX2Transformer(LTX2TransformerTesterConfig, ModelTesterMixin):
|
||||
"""Core model tests for LTX2 Video Transformer."""
|
||||
|
||||
|
||||
class TestLTX2TransformerMemory(LTX2TransformerTesterConfig, MemoryTesterMixin):
|
||||
"""Memory optimization tests for LTX2 Video Transformer."""
|
||||
|
||||
|
||||
class TestLTX2TransformerTraining(LTX2TransformerTesterConfig, TrainingTesterMixin):
|
||||
"""Training tests for LTX2 Video Transformer."""
|
||||
inputs_dict = self.dummy_input
|
||||
return init_dict, inputs_dict
|
||||
|
||||
def test_gradient_checkpointing_is_applied(self):
|
||||
super().test_gradient_checkpointing_is_applied(expected_set={"LTX2VideoTransformer3DModel"})
|
||||
expected_set = {"LTX2VideoTransformer3DModel"}
|
||||
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
||||
|
||||
# def test_ltx2_consistency(self, seed=0, dtype=torch.float32):
|
||||
# torch.manual_seed(seed)
|
||||
# init_dict, _ = self.prepare_init_args_and_inputs_for_common()
|
||||
|
||||
# # Calculate dummy inputs in a custom manner to ensure compatibility with original code
|
||||
# batch_size = 2
|
||||
# num_frames = 9
|
||||
# latent_frames = 2
|
||||
# text_embedding_dim = 16
|
||||
# text_seq_len = 16
|
||||
# fps = 25.0
|
||||
# sampling_rate = 16000.0
|
||||
# hop_length = 160.0
|
||||
|
||||
# sigma = torch.rand((1,), generator=torch.manual_seed(seed), dtype=dtype, device="cpu") * 1000
|
||||
# timestep = (sigma * torch.ones((batch_size,), dtype=dtype, device="cpu")).to(device=torch_device)
|
||||
|
||||
# num_channels = 4
|
||||
# latent_height = 4
|
||||
# latent_width = 4
|
||||
# hidden_states = torch.randn(
|
||||
# (batch_size, num_channels, latent_frames, latent_height, latent_width),
|
||||
# generator=torch.manual_seed(seed),
|
||||
# dtype=dtype,
|
||||
# device="cpu",
|
||||
# )
|
||||
# # Patchify video latents (with patch_size (1, 1, 1))
|
||||
# hidden_states = hidden_states.reshape(batch_size, -1, latent_frames, 1, latent_height, 1, latent_width, 1)
|
||||
# hidden_states = hidden_states.permute(0, 2, 4, 6, 1, 3, 5, 7).flatten(4, 7).flatten(1, 3)
|
||||
# encoder_hidden_states = torch.randn(
|
||||
# (batch_size, text_seq_len, text_embedding_dim),
|
||||
# generator=torch.manual_seed(seed),
|
||||
# dtype=dtype,
|
||||
# device="cpu",
|
||||
# )
|
||||
|
||||
# audio_num_channels = 2
|
||||
# num_mel_bins = 2
|
||||
# latent_length = int((sampling_rate / hop_length / 4) * (num_frames / fps))
|
||||
# audio_hidden_states = torch.randn(
|
||||
# (batch_size, audio_num_channels, latent_length, num_mel_bins),
|
||||
# generator=torch.manual_seed(seed),
|
||||
# dtype=dtype,
|
||||
# device="cpu",
|
||||
# )
|
||||
# # Patchify audio latents
|
||||
# audio_hidden_states = audio_hidden_states.transpose(1, 2).flatten(2, 3)
|
||||
# audio_encoder_hidden_states = torch.randn(
|
||||
# (batch_size, text_seq_len, text_embedding_dim),
|
||||
# generator=torch.manual_seed(seed),
|
||||
# dtype=dtype,
|
||||
# device="cpu",
|
||||
# )
|
||||
|
||||
# inputs_dict = {
|
||||
# "hidden_states": hidden_states.to(device=torch_device),
|
||||
# "audio_hidden_states": audio_hidden_states.to(device=torch_device),
|
||||
# "encoder_hidden_states": encoder_hidden_states.to(device=torch_device),
|
||||
# "audio_encoder_hidden_states": audio_encoder_hidden_states.to(device=torch_device),
|
||||
# "timestep": timestep,
|
||||
# "num_frames": latent_frames,
|
||||
# "height": latent_height,
|
||||
# "width": latent_width,
|
||||
# "audio_num_frames": num_frames,
|
||||
# "fps": 25.0,
|
||||
# }
|
||||
|
||||
# model = self.model_class.from_pretrained(
|
||||
# "diffusers-internal-dev/dummy-ltx2",
|
||||
# subfolder="transformer",
|
||||
# device_map="cpu",
|
||||
# )
|
||||
# # torch.manual_seed(seed)
|
||||
# # model = self.model_class(**init_dict)
|
||||
# model.to(torch_device)
|
||||
# model.eval()
|
||||
|
||||
# with attention_backend("native"):
|
||||
# with torch.no_grad():
|
||||
# output = model(**inputs_dict)
|
||||
|
||||
# video_output, audio_output = output.to_tuple()
|
||||
|
||||
# self.assertIsNotNone(video_output)
|
||||
# self.assertIsNotNone(audio_output)
|
||||
|
||||
# # input & output have to have the same shape
|
||||
# video_expected_shape = (batch_size, latent_frames * latent_height * latent_width, num_channels)
|
||||
# self.assertEqual(video_output.shape, video_expected_shape, "Video input and output shapes do not match")
|
||||
# audio_expected_shape = (batch_size, latent_length, audio_num_channels * num_mel_bins)
|
||||
# self.assertEqual(audio_output.shape, audio_expected_shape, "Audio input and output shapes do not match")
|
||||
|
||||
# # Check against expected slice
|
||||
# # fmt: off
|
||||
# video_expected_slice = torch.tensor([0.4783, 1.6954, -1.2092, 0.1762, 0.7801, 1.2025, -1.4525, -0.2721, 0.3354, 1.9144, -1.5546, 0.0831, 0.4391, 1.7012, -1.7373, -0.2676])
|
||||
# audio_expected_slice = torch.tensor([-0.4236, 0.4750, 0.3901, -0.4339, -0.2782, 0.4357, 0.4526, -0.3927, -0.0980, 0.4870, 0.3964, -0.3169, -0.3974, 0.4408, 0.3809, -0.4692])
|
||||
# # fmt: on
|
||||
|
||||
# video_output_flat = video_output.cpu().flatten().float()
|
||||
# video_generated_slice = torch.cat([video_output_flat[:8], video_output_flat[-8:]])
|
||||
# self.assertTrue(torch.allclose(video_generated_slice, video_expected_slice, atol=1e-4))
|
||||
|
||||
# audio_output_flat = audio_output.cpu().flatten().float()
|
||||
# audio_generated_slice = torch.cat([audio_output_flat[:8], audio_output_flat[-8:]])
|
||||
# self.assertTrue(torch.allclose(audio_generated_slice, audio_expected_slice, atol=1e-4))
|
||||
|
||||
|
||||
class TestLTX2TransformerAttention(LTX2TransformerTesterConfig, AttentionTesterMixin):
|
||||
"""Attention processor tests for LTX2 Video Transformer."""
|
||||
class LTX2TransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase):
|
||||
model_class = LTX2VideoTransformer3DModel
|
||||
|
||||
@pytest.mark.skip(
|
||||
"LTX2Attention does not set is_cross_attention, so fuse_projections tries to fuse Q+K+V together even for cross-attention modules with different input dimensions."
|
||||
)
|
||||
def test_fuse_unfuse_qkv_projections(self, atol=1e-3, rtol=0):
|
||||
pass
|
||||
|
||||
|
||||
class TestLTX2TransformerCompile(LTX2TransformerTesterConfig, TorchCompileTesterMixin):
|
||||
"""Torch compile tests for LTX2 Video Transformer."""
|
||||
|
||||
|
||||
# TODO: Add pretrained_model_name_or_path once a tiny LTX2 model is available on the Hub
|
||||
# class TestLTX2TransformerBitsAndBytes(LTX2TransformerTesterConfig, BitsAndBytesTesterMixin):
|
||||
# """BitsAndBytes quantization tests for LTX2 Video Transformer."""
|
||||
|
||||
|
||||
# TODO: Add pretrained_model_name_or_path once a tiny LTX2 model is available on the Hub
|
||||
# class TestLTX2TransformerTorchAo(LTX2TransformerTesterConfig, TorchAoTesterMixin):
|
||||
# """TorchAo quantization tests for LTX2 Video Transformer."""
|
||||
def prepare_init_args_and_inputs_for_common(self):
|
||||
return LTX2TransformerTests().prepare_init_args_and_inputs_for_common()
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2025 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -13,16 +12,23 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gc
|
||||
import os
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from diffusers import ZImageTransformer2DModel
|
||||
from diffusers.utils.torch_utils import randn_tensor
|
||||
|
||||
from ...testing_utils import IS_GITHUB_ACTIONS, torch_device
|
||||
from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin
|
||||
from ...testing_utils import assert_tensors_close, torch_device
|
||||
from ..testing_utils import (
|
||||
BaseModelTesterConfig,
|
||||
LoraTesterMixin,
|
||||
MemoryTesterMixin,
|
||||
ModelTesterMixin,
|
||||
TorchCompileTesterMixin,
|
||||
TrainingTesterMixin,
|
||||
)
|
||||
|
||||
|
||||
# Z-Image requires torch.use_deterministic_algorithms(False) due to complex64 RoPE operations
|
||||
@@ -36,44 +42,38 @@ if hasattr(torch.backends, "cuda"):
|
||||
torch.backends.cuda.matmul.allow_tf32 = False
|
||||
|
||||
|
||||
@unittest.skipIf(
|
||||
IS_GITHUB_ACTIONS,
|
||||
reason="Skipping test-suite inside the CI because the model has `torch.empty()` inside of it during init and we don't have a clear way to override it in the modeling tests.",
|
||||
)
|
||||
class ZImageTransformerTests(ModelTesterMixin, unittest.TestCase):
|
||||
model_class = ZImageTransformer2DModel
|
||||
main_input_name = "x"
|
||||
# We override the items here because the transformer under consideration is small.
|
||||
model_split_percents = [0.9, 0.9, 0.9]
|
||||
def _concat_list_output(output):
|
||||
"""Model output `sample` is a list of tensors. Concatenate them for comparison."""
|
||||
return torch.cat([t.flatten() for t in output])
|
||||
|
||||
def prepare_dummy_input(self, height=16, width=16):
|
||||
batch_size = 1
|
||||
num_channels = 16
|
||||
embedding_dim = 16
|
||||
sequence_length = 16
|
||||
|
||||
hidden_states = [torch.randn((num_channels, 1, height, width)).to(torch_device) for _ in range(batch_size)]
|
||||
encoder_hidden_states = [
|
||||
torch.randn((sequence_length, embedding_dim)).to(torch_device) for _ in range(batch_size)
|
||||
]
|
||||
timestep = torch.tensor([0.0]).to(torch_device)
|
||||
|
||||
return {"x": hidden_states, "cap_feats": encoder_hidden_states, "t": timestep}
|
||||
class ZImageTransformerTesterConfig(BaseModelTesterConfig):
|
||||
@property
|
||||
def model_class(self):
|
||||
return ZImageTransformer2DModel
|
||||
|
||||
@property
|
||||
def dummy_input(self):
|
||||
return self.prepare_dummy_input()
|
||||
|
||||
@property
|
||||
def input_shape(self):
|
||||
def output_shape(self) -> tuple[int, ...]:
|
||||
return (4, 32, 32)
|
||||
|
||||
@property
|
||||
def output_shape(self):
|
||||
def input_shape(self) -> tuple[int, ...]:
|
||||
return (4, 32, 32)
|
||||
|
||||
def prepare_init_args_and_inputs_for_common(self):
|
||||
init_dict = {
|
||||
@property
|
||||
def model_split_percents(self) -> list:
|
||||
return [0.9, 0.9, 0.9]
|
||||
|
||||
@property
|
||||
def main_input_name(self) -> str:
|
||||
return "x"
|
||||
|
||||
@property
|
||||
def generator(self):
|
||||
return torch.Generator("cpu").manual_seed(0)
|
||||
|
||||
def get_init_dict(self):
|
||||
return {
|
||||
"all_patch_size": (2,),
|
||||
"all_f_patch_size": (1,),
|
||||
"in_channels": 16,
|
||||
@@ -89,83 +89,223 @@ class ZImageTransformerTests(ModelTesterMixin, unittest.TestCase):
|
||||
"axes_dims": [8, 4, 4],
|
||||
"axes_lens": [256, 32, 32],
|
||||
}
|
||||
inputs_dict = self.dummy_input
|
||||
return init_dict, inputs_dict
|
||||
|
||||
def setUp(self):
|
||||
gc.collect()
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
torch.cuda.synchronize()
|
||||
def get_dummy_inputs(self) -> dict[str, torch.Tensor | list]:
|
||||
batch_size = 1
|
||||
num_channels = 16
|
||||
embedding_dim = 16
|
||||
sequence_length = 16
|
||||
height = 16
|
||||
width = 16
|
||||
|
||||
hidden_states = [
|
||||
randn_tensor((num_channels, 1, height, width), generator=self.generator, device=torch_device)
|
||||
for _ in range(batch_size)
|
||||
]
|
||||
encoder_hidden_states = [
|
||||
randn_tensor((sequence_length, embedding_dim), generator=self.generator, device=torch_device)
|
||||
for _ in range(batch_size)
|
||||
]
|
||||
timestep = torch.tensor([0.0]).to(torch_device)
|
||||
|
||||
return {"x": hidden_states, "cap_feats": encoder_hidden_states, "t": timestep}
|
||||
|
||||
|
||||
class TestZImageTransformer(ZImageTransformerTesterConfig, ModelTesterMixin):
|
||||
"""Core model tests for Z-Image Transformer."""
|
||||
|
||||
@torch.no_grad()
|
||||
def test_determinism(self, atol=1e-5, rtol=0):
|
||||
model = self.model_class(**self.get_init_dict())
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
inputs_dict = self.get_dummy_inputs()
|
||||
first = _concat_list_output(model(**inputs_dict, return_dict=False)[0])
|
||||
second = _concat_list_output(model(**inputs_dict, return_dict=False)[0])
|
||||
|
||||
mask = ~(torch.isnan(first) | torch.isnan(second))
|
||||
assert_tensors_close(
|
||||
first[mask], second[mask], atol=atol, rtol=rtol, msg="Model outputs are not deterministic"
|
||||
)
|
||||
|
||||
def test_from_save_pretrained(self, tmp_path, atol=5e-5, rtol=5e-5):
|
||||
torch.manual_seed(0)
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.manual_seed_all(0)
|
||||
model = self.model_class(**self.get_init_dict())
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
model.save_pretrained(tmp_path)
|
||||
new_model = self.model_class.from_pretrained(tmp_path)
|
||||
new_model.to(torch_device)
|
||||
|
||||
for param_name in model.state_dict().keys():
|
||||
param_1 = model.state_dict()[param_name]
|
||||
param_2 = new_model.state_dict()[param_name]
|
||||
assert param_1.shape == param_2.shape
|
||||
|
||||
inputs_dict = self.get_dummy_inputs()
|
||||
image = _concat_list_output(model(**inputs_dict, return_dict=False)[0])
|
||||
new_image = _concat_list_output(new_model(**inputs_dict, return_dict=False)[0])
|
||||
|
||||
assert_tensors_close(image, new_image, atol=atol, rtol=rtol, msg="Models give different forward passes.")
|
||||
|
||||
@torch.no_grad()
|
||||
def test_from_save_pretrained_variant(self, tmp_path, atol=5e-5, rtol=0):
|
||||
model = self.model_class(**self.get_init_dict())
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
model.save_pretrained(tmp_path, variant="fp16")
|
||||
new_model = self.model_class.from_pretrained(tmp_path, variant="fp16")
|
||||
|
||||
with pytest.raises(OSError) as exc_info:
|
||||
self.model_class.from_pretrained(tmp_path)
|
||||
|
||||
assert "Error no file named diffusion_pytorch_model.bin found in directory" in str(exc_info.value)
|
||||
|
||||
new_model.to(torch_device)
|
||||
|
||||
inputs_dict = self.get_dummy_inputs()
|
||||
image = _concat_list_output(model(**inputs_dict, return_dict=False)[0])
|
||||
new_image = _concat_list_output(new_model(**inputs_dict, return_dict=False)[0])
|
||||
|
||||
assert_tensors_close(image, new_image, atol=atol, rtol=rtol, msg="Models give different forward passes.")
|
||||
|
||||
@pytest.mark.skip("Model output `sample` is a list of tensors, not a single tensor.")
|
||||
def test_outputs_equivalence(self, atol=1e-5, rtol=0):
|
||||
pass
|
||||
|
||||
def test_sharded_checkpoints_with_parallel_loading(self, tmp_path, atol=1e-5, rtol=0):
|
||||
from diffusers.utils import SAFE_WEIGHTS_INDEX_NAME, constants
|
||||
|
||||
from ..testing_utils.common import calculate_expected_num_shards, compute_module_persistent_sizes
|
||||
|
||||
def tearDown(self):
|
||||
super().tearDown()
|
||||
gc.collect()
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
torch.cuda.synchronize()
|
||||
torch.manual_seed(0)
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.manual_seed_all(0)
|
||||
config = self.get_init_dict()
|
||||
inputs_dict = self.get_dummy_inputs()
|
||||
model = self.model_class(**config).eval()
|
||||
model = model.to(torch_device)
|
||||
|
||||
def test_gradient_checkpointing_is_applied(self):
|
||||
expected_set = {"ZImageTransformer2DModel"}
|
||||
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
||||
base_output = _concat_list_output(model(**inputs_dict, return_dict=False)[0])
|
||||
|
||||
@unittest.skip("Test is not supported for handling main inputs that are lists.")
|
||||
def test_training(self):
|
||||
super().test_training()
|
||||
model_size = compute_module_persistent_sizes(model)[""]
|
||||
max_shard_size = int((model_size * 0.75) / (2**10))
|
||||
|
||||
@unittest.skip("Test is not supported for handling main inputs that are lists.")
|
||||
def test_ema_training(self):
|
||||
super().test_ema_training()
|
||||
original_parallel_loading = constants.HF_ENABLE_PARALLEL_LOADING
|
||||
original_parallel_workers = getattr(constants, "HF_PARALLEL_WORKERS", None)
|
||||
|
||||
@unittest.skip("Test is not supported for handling main inputs that are lists.")
|
||||
def test_effective_gradient_checkpointing(self):
|
||||
super().test_effective_gradient_checkpointing()
|
||||
try:
|
||||
model.cpu().save_pretrained(tmp_path, max_shard_size=f"{max_shard_size}KB")
|
||||
assert os.path.exists(os.path.join(tmp_path, SAFE_WEIGHTS_INDEX_NAME))
|
||||
|
||||
@unittest.skip(
|
||||
"Test needs to be revisited. But we need to ensure `x_pad_token` and `cap_pad_token` are cast to the same dtype as the destination tensor before they are assigned to the padding indices."
|
||||
expected_num_shards = calculate_expected_num_shards(os.path.join(tmp_path, SAFE_WEIGHTS_INDEX_NAME))
|
||||
actual_num_shards = len([file for file in os.listdir(tmp_path) if file.endswith(".safetensors")])
|
||||
assert actual_num_shards == expected_num_shards
|
||||
|
||||
constants.HF_ENABLE_PARALLEL_LOADING = False
|
||||
self.model_class.from_pretrained(tmp_path).eval().to(torch_device)
|
||||
|
||||
constants.HF_ENABLE_PARALLEL_LOADING = True
|
||||
constants.DEFAULT_HF_PARALLEL_LOADING_WORKERS = 2
|
||||
|
||||
torch.manual_seed(0)
|
||||
model_parallel = self.model_class.from_pretrained(tmp_path).eval()
|
||||
model_parallel = model_parallel.to(torch_device)
|
||||
|
||||
output_parallel = _concat_list_output(model_parallel(**inputs_dict, return_dict=False)[0])
|
||||
|
||||
assert_tensors_close(
|
||||
base_output, output_parallel, atol=atol, rtol=rtol, msg="Output should match with parallel loading"
|
||||
)
|
||||
finally:
|
||||
constants.HF_ENABLE_PARALLEL_LOADING = original_parallel_loading
|
||||
if original_parallel_workers is not None:
|
||||
constants.HF_PARALLEL_WORKERS = original_parallel_workers
|
||||
|
||||
|
||||
class TestZImageTransformerMemory(ZImageTransformerTesterConfig, MemoryTesterMixin):
|
||||
"""Memory optimization tests for Z-Image Transformer."""
|
||||
|
||||
@pytest.mark.skip(
|
||||
"Ensure `x_pad_token` and `cap_pad_token` are cast to the same dtype as the destination tensor before they are assigned to the padding indices."
|
||||
)
|
||||
def test_layerwise_casting_training(self):
|
||||
super().test_layerwise_casting_training()
|
||||
|
||||
@unittest.skip("Test is not supported for handling main inputs that are lists.")
|
||||
def test_outputs_equivalence(self):
|
||||
super().test_outputs_equivalence()
|
||||
|
||||
@unittest.skip("Test will pass if we change to deterministic values instead of empty in the DiT.")
|
||||
def test_group_offloading(self):
|
||||
super().test_group_offloading()
|
||||
|
||||
@unittest.skip("Test will pass if we change to deterministic values instead of empty in the DiT.")
|
||||
def test_group_offloading_with_disk(self):
|
||||
super().test_group_offloading_with_disk()
|
||||
pass
|
||||
|
||||
|
||||
class ZImageTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase):
|
||||
model_class = ZImageTransformer2DModel
|
||||
different_shapes_for_compilation = [(4, 4), (4, 8), (8, 8)]
|
||||
class TestZImageTransformerTraining(ZImageTransformerTesterConfig, TrainingTesterMixin):
|
||||
"""Training tests for Z-Image Transformer."""
|
||||
|
||||
def prepare_init_args_and_inputs_for_common(self):
|
||||
return ZImageTransformerTests().prepare_init_args_and_inputs_for_common()
|
||||
def test_gradient_checkpointing_is_applied(self):
|
||||
super().test_gradient_checkpointing_is_applied(expected_set={"ZImageTransformer2DModel"})
|
||||
|
||||
def prepare_dummy_input(self, height, width):
|
||||
return ZImageTransformerTests().prepare_dummy_input(height=height, width=width)
|
||||
@pytest.mark.skip("Test is not supported for handling main inputs that are lists.")
|
||||
def test_training(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(
|
||||
"The repeated block in this model is ZImageTransformerBlock, which is used for noise_refiner, context_refiner, and layers. As a consequence of this, the inputs recorded for the block would vary during compilation and full compilation with fullgraph=True would trigger recompilation at least thrice."
|
||||
@pytest.mark.skip("Test is not supported for handling main inputs that are lists.")
|
||||
def test_training_with_ema(self):
|
||||
pass
|
||||
|
||||
@pytest.mark.skip("Test is not supported for handling main inputs that are lists.")
|
||||
def test_gradient_checkpointing_equivalence(self, loss_tolerance=1e-5, param_grad_tol=5e-5, skip=None):
|
||||
pass
|
||||
|
||||
|
||||
class TestZImageTransformerLoRA(ZImageTransformerTesterConfig, LoraTesterMixin):
|
||||
"""LoRA adapter tests for Z-Image Transformer."""
|
||||
|
||||
@pytest.mark.skip("Model output `sample` is a list of tensors, not a single tensor.")
|
||||
def test_save_load_lora_adapter(self, tmp_path, rank=4, lora_alpha=4, use_dora=False, atol=1e-4, rtol=1e-4):
|
||||
pass
|
||||
|
||||
|
||||
# TODO: Add pretrained_model_name_or_path once a tiny Z-Image model is available on the Hub
|
||||
# class TestZImageTransformerBitsAndBytes(ZImageTransformerTesterConfig, BitsAndBytesTesterMixin):
|
||||
# """BitsAndBytes quantization tests for Z-Image Transformer."""
|
||||
|
||||
|
||||
# TODO: Add pretrained_model_name_or_path once a tiny Z-Image model is available on the Hub
|
||||
# class TestZImageTransformerTorchAo(ZImageTransformerTesterConfig, TorchAoTesterMixin):
|
||||
# """TorchAo quantization tests for Z-Image Transformer."""
|
||||
|
||||
|
||||
class TestZImageTransformerCompile(ZImageTransformerTesterConfig, TorchCompileTesterMixin):
|
||||
"""Torch compile tests for Z-Image Transformer."""
|
||||
|
||||
@property
|
||||
def different_shapes_for_compilation(self):
|
||||
return [(4, 4), (4, 8), (8, 8)]
|
||||
|
||||
def get_dummy_inputs(self, height: int = 16, width: int = 16) -> dict[str, torch.Tensor | list]:
|
||||
batch_size = 1
|
||||
num_channels = 16
|
||||
embedding_dim = 16
|
||||
sequence_length = 16
|
||||
|
||||
hidden_states = [
|
||||
randn_tensor((num_channels, 1, height, width), generator=self.generator, device=torch_device)
|
||||
for _ in range(batch_size)
|
||||
]
|
||||
encoder_hidden_states = [
|
||||
randn_tensor((sequence_length, embedding_dim), generator=self.generator, device=torch_device)
|
||||
for _ in range(batch_size)
|
||||
]
|
||||
timestep = torch.tensor([0.0]).to(torch_device)
|
||||
|
||||
return {"x": hidden_states, "cap_feats": encoder_hidden_states, "t": timestep}
|
||||
|
||||
@pytest.mark.skip(
|
||||
"The repeated block in this model is ZImageTransformerBlock, which is used for noise_refiner, context_refiner, and layers. The inputs recorded for the block would vary during compilation and full compilation with fullgraph=True would trigger recompilation at least thrice."
|
||||
)
|
||||
def test_torch_compile_recompilation_and_graph_break(self):
|
||||
super().test_torch_compile_recompilation_and_graph_break()
|
||||
pass
|
||||
|
||||
@unittest.skip("Fullgraph AoT is broken")
|
||||
def test_compile_works_with_aot(self):
|
||||
super().test_compile_works_with_aot()
|
||||
@pytest.mark.skip("Fullgraph AoT is broken")
|
||||
def test_compile_works_with_aot(self, tmp_path):
|
||||
pass
|
||||
|
||||
@unittest.skip("Fullgraph is broken")
|
||||
@pytest.mark.skip("Fullgraph is broken")
|
||||
def test_compile_on_different_shapes(self):
|
||||
super().test_compile_on_different_shapes()
|
||||
pass
|
||||
|
||||
Reference in New Issue
Block a user