Compare commits

...

1 Commits

Author SHA1 Message Date
DN6
e77bad6a16 update 2026-03-26 16:33:06 +05:30
3 changed files with 276 additions and 112 deletions

View File

@@ -13,23 +13,31 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Any
import torch
from diffusers import ChromaTransformer2DModel
from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0
from diffusers.models.embeddings import ImageProjection
from diffusers.utils.torch_utils import randn_tensor
from ...testing_utils import enable_full_determinism, torch_device
from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin
from ..testing_utils import (
BaseModelTesterConfig,
IPAdapterTesterMixin,
LoraHotSwappingForModelTesterMixin,
LoraTesterMixin,
ModelTesterMixin,
TorchCompileTesterMixin,
TrainingTesterMixin,
)
enable_full_determinism()
def create_chroma_ip_adapter_state_dict(model):
# "ip_adapter" (cross-attention weights)
def create_chroma_ip_adapter_state_dict(model) -> dict[str, dict[str, Any]]:
ip_cross_attn_state_dict = {}
key_id = 0
@@ -50,11 +58,8 @@ def create_chroma_ip_adapter_state_dict(model):
f"{key_id}.to_v_ip.bias": sd["to_v_ip.0.bias"],
}
)
key_id += 1
# "image_proj" (ImageProjection layer weights)
image_projection = ImageProjection(
cross_attention_dim=model.config["joint_attention_dim"],
image_embed_dim=model.config["pooled_projection_dim"],
@@ -73,53 +78,36 @@ def create_chroma_ip_adapter_state_dict(model):
)
del sd
ip_state_dict = {}
ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict})
return ip_state_dict
return {"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict}
class ChromaTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = ChromaTransformer2DModel
main_input_name = "hidden_states"
# We override the items here because the transformer under consideration is small.
model_split_percents = [0.8, 0.7, 0.7]
# Skip setting testing with default: AttnProcessor
uses_custom_attn_processor = True
class ChromaTransformerTesterConfig(BaseModelTesterConfig):
@property
def model_class(self):
return ChromaTransformer2DModel
@property
def dummy_input(self):
batch_size = 1
num_latent_channels = 4
num_image_channels = 3
height = width = 4
sequence_length = 48
embedding_dim = 32
def main_input_name(self) -> str:
return "hidden_states"
hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
text_ids = torch.randn((sequence_length, num_image_channels)).to(torch_device)
image_ids = torch.randn((height * width, num_image_channels)).to(torch_device)
timestep = torch.tensor([1.0]).to(torch_device).expand(batch_size)
@property
def model_split_percents(self) -> list:
return [0.8, 0.7, 0.7]
@property
def output_shape(self) -> tuple:
return (16, 4)
@property
def input_shape(self) -> tuple:
return (16, 4)
@property
def generator(self):
return torch.Generator("cpu").manual_seed(0)
def get_init_dict(self) -> dict:
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"img_ids": image_ids,
"txt_ids": text_ids,
"timestep": timestep,
}
@property
def input_shape(self):
return (16, 4)
@property
def output_shape(self):
return (16, 4)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": 1,
"in_channels": 4,
"num_layers": 1,
@@ -133,11 +121,35 @@ class ChromaTransformerTests(ModelTesterMixin, unittest.TestCase):
"approximator_layers": 1,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def get_dummy_inputs(self, batch_size: int = 1) -> dict[str, torch.Tensor]:
num_latent_channels = 4
num_image_channels = 3
height = width = 4
sequence_length = 48
embedding_dim = 32
return {
"hidden_states": randn_tensor(
(batch_size, height * width, num_latent_channels), generator=self.generator, device=torch_device
),
"encoder_hidden_states": randn_tensor(
(batch_size, sequence_length, embedding_dim), generator=self.generator, device=torch_device
),
"img_ids": randn_tensor(
(height * width, num_image_channels), generator=self.generator, device=torch_device
),
"txt_ids": randn_tensor(
(sequence_length, num_image_channels), generator=self.generator, device=torch_device
),
"timestep": torch.tensor([1.0]).to(torch_device).expand(batch_size),
}
class TestChromaTransformer(ChromaTransformerTesterConfig, ModelTesterMixin):
def test_deprecated_inputs_img_txt_ids_3d(self):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
init_dict = self.get_init_dict()
inputs_dict = self.get_dummy_inputs()
model = self.model_class(**init_dict)
model.to(torch_device)
model.eval()
@@ -145,12 +157,11 @@ class ChromaTransformerTests(ModelTesterMixin, unittest.TestCase):
with torch.no_grad():
output_1 = model(**inputs_dict).to_tuple()[0]
# update inputs_dict with txt_ids and img_ids as 3d tensors (deprecated)
text_ids_3d = inputs_dict["txt_ids"].unsqueeze(0)
image_ids_3d = inputs_dict["img_ids"].unsqueeze(0)
assert text_ids_3d.ndim == 3, "text_ids_3d should be a 3d tensor"
assert image_ids_3d.ndim == 3, "img_ids_3d should be a 3d tensor"
assert text_ids_3d.ndim == 3
assert image_ids_3d.ndim == 3
inputs_dict["txt_ids"] = text_ids_3d
inputs_dict["img_ids"] = image_ids_3d
@@ -158,26 +169,59 @@ class ChromaTransformerTests(ModelTesterMixin, unittest.TestCase):
with torch.no_grad():
output_2 = model(**inputs_dict).to_tuple()[0]
self.assertEqual(output_1.shape, output_2.shape)
self.assertTrue(
torch.allclose(output_1, output_2, atol=1e-5),
msg="output with deprecated inputs (img_ids and txt_ids as 3d torch tensors) are not equal as them as 2d inputs",
assert output_1.shape == output_2.shape
assert torch.allclose(output_1, output_2, atol=1e-5), (
"output with deprecated inputs (img_ids and txt_ids as 3d torch tensors) "
"are not equal as them as 2d inputs"
)
class TestChromaTransformerTraining(ChromaTransformerTesterConfig, TrainingTesterMixin):
def test_gradient_checkpointing_is_applied(self):
expected_set = {"ChromaTransformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
class ChromaTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase):
model_class = ChromaTransformer2DModel
def prepare_init_args_and_inputs_for_common(self):
return ChromaTransformerTests().prepare_init_args_and_inputs_for_common()
class TestChromaTransformerCompile(ChromaTransformerTesterConfig, TorchCompileTesterMixin):
pass
class ChromaTransformerLoRAHotSwapTests(LoraHotSwappingForModelTesterMixin, unittest.TestCase):
model_class = ChromaTransformer2DModel
class TestChromaTransformerIPAdapter(ChromaTransformerTesterConfig, IPAdapterTesterMixin):
@property
def ip_adapter_processor_cls(self):
return FluxIPAdapterJointAttnProcessor2_0
def prepare_init_args_and_inputs_for_common(self):
return ChromaTransformerTests().prepare_init_args_and_inputs_for_common()
def modify_inputs_for_ip_adapter(self, model, inputs_dict):
torch.manual_seed(0)
cross_attention_dim = getattr(model.config, "joint_attention_dim", 32)
image_embeds = torch.randn(1, 1, cross_attention_dim).to(torch_device)
inputs_dict.update({"joint_attention_kwargs": {"ip_adapter_image_embeds": image_embeds}})
return inputs_dict
def create_ip_adapter_state_dict(self, model: Any) -> dict[str, dict[str, Any]]:
return create_chroma_ip_adapter_state_dict(model)
class TestChromaTransformerLoRA(ChromaTransformerTesterConfig, LoraTesterMixin):
pass
class TestChromaTransformerLoRAHotSwap(ChromaTransformerTesterConfig, LoraHotSwappingForModelTesterMixin):
@property
def different_shapes_for_compilation(self):
return [(4, 4), (4, 8), (8, 8)]
def get_dummy_inputs(self, height: int = 4, width: int = 4) -> dict[str, torch.Tensor]:
batch_size = 1
num_latent_channels = 4
num_image_channels = 3
sequence_length = 24
embedding_dim = 32
return {
"hidden_states": randn_tensor((batch_size, height * width, num_latent_channels), device=torch_device),
"encoder_hidden_states": randn_tensor((batch_size, sequence_length, embedding_dim), device=torch_device),
"img_ids": randn_tensor((height * width, num_image_channels), device=torch_device),
"txt_ids": randn_tensor((sequence_length, num_image_channels), device=torch_device),
"timestep": torch.tensor([1.0]).to(torch_device).expand(batch_size),
}

View File

@@ -13,61 +13,50 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import HiDreamImageTransformer2DModel
from diffusers.utils.torch_utils import randn_tensor
from ...testing_utils import (
enable_full_determinism,
torch_device,
from ...testing_utils import enable_full_determinism, torch_device
from ..testing_utils import (
BaseModelTesterConfig,
ModelTesterMixin,
TorchCompileTesterMixin,
TrainingTesterMixin,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class HiDreamTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = HiDreamImageTransformer2DModel
main_input_name = "hidden_states"
model_split_percents = [0.8, 0.8, 0.9]
class HiDreamTransformerTesterConfig(BaseModelTesterConfig):
@property
def model_class(self):
return HiDreamImageTransformer2DModel
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
height = width = 32
embedding_dim_t5, embedding_dim_llama, embedding_dim_pooled = 8, 4, 8
sequence_length = 8
def main_input_name(self) -> str:
return "hidden_states"
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states_t5 = torch.randn((batch_size, sequence_length, embedding_dim_t5)).to(torch_device)
encoder_hidden_states_llama3 = torch.randn((batch_size, batch_size, sequence_length, embedding_dim_llama)).to(
torch_device
)
pooled_embeds = torch.randn((batch_size, embedding_dim_pooled)).to(torch_device)
timesteps = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
@property
def model_split_percents(self) -> list:
return [0.8, 0.8, 0.9]
@property
def output_shape(self) -> tuple:
return (4, 32, 32)
@property
def input_shape(self) -> tuple:
return (4, 32, 32)
@property
def generator(self):
return torch.Generator("cpu").manual_seed(0)
def get_init_dict(self) -> dict:
return {
"hidden_states": hidden_states,
"encoder_hidden_states_t5": encoder_hidden_states_t5,
"encoder_hidden_states_llama3": encoder_hidden_states_llama3,
"pooled_embeds": pooled_embeds,
"timesteps": timesteps,
}
@property
def input_shape(self):
return (4, 32, 32)
@property
def output_shape(self):
return (4, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": 2,
"in_channels": 4,
"out_channels": 4,
@@ -82,15 +71,43 @@ class HiDreamTransformerTests(ModelTesterMixin, unittest.TestCase):
"axes_dims_rope": (4, 2, 2),
"max_resolution": (32, 32),
"llama_layers": (0, 1),
"force_inference_output": True, # TODO: as we don't implement MoE loss in training tests.
"force_inference_output": True,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
@unittest.skip("HiDreamImageTransformer2DModel uses a dedicated attention processor. This test doesn't apply")
def test_set_attn_processor_for_determinism(self):
pass
def get_dummy_inputs(self, batch_size: int = 2) -> dict[str, torch.Tensor]:
num_channels = 4
height = width = 32
embedding_dim_t5, embedding_dim_llama, embedding_dim_pooled = 8, 4, 8
sequence_length = 8
return {
"hidden_states": randn_tensor(
(batch_size, num_channels, height, width), generator=self.generator, device=torch_device
),
"encoder_hidden_states_t5": randn_tensor(
(batch_size, sequence_length, embedding_dim_t5), generator=self.generator, device=torch_device
),
"encoder_hidden_states_llama3": randn_tensor(
(batch_size, batch_size, sequence_length, embedding_dim_llama),
generator=self.generator,
device=torch_device,
),
"pooled_embeds": randn_tensor(
(batch_size, embedding_dim_pooled), generator=self.generator, device=torch_device
),
"timesteps": torch.randint(0, 1000, size=(batch_size,), generator=self.generator).to(torch_device),
}
class TestHiDreamTransformer(HiDreamTransformerTesterConfig, ModelTesterMixin):
pass
class TestHiDreamTransformerTraining(HiDreamTransformerTesterConfig, TrainingTesterMixin):
def test_gradient_checkpointing_is_applied(self):
expected_set = {"HiDreamImageTransformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
class TestHiDreamTransformerCompile(HiDreamTransformerTesterConfig, TorchCompileTesterMixin):
pass

View File

@@ -0,0 +1,103 @@
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from diffusers import LongCatImageTransformer2DModel
from diffusers.utils.torch_utils import randn_tensor
from ...testing_utils import enable_full_determinism, torch_device
from ..testing_utils import (
BaseModelTesterConfig,
ModelTesterMixin,
TorchCompileTesterMixin,
TrainingTesterMixin,
)
enable_full_determinism()
class LongCatImageTransformerTesterConfig(BaseModelTesterConfig):
@property
def model_class(self):
return LongCatImageTransformer2DModel
@property
def main_input_name(self) -> str:
return "hidden_states"
@property
def output_shape(self) -> tuple:
return (16, 4)
@property
def input_shape(self) -> tuple:
return (16, 4)
@property
def generator(self):
return torch.Generator("cpu").manual_seed(0)
def get_init_dict(self) -> dict:
return {
"patch_size": 1,
"in_channels": 4,
"num_layers": 1,
"num_single_layers": 1,
"attention_head_dim": 16,
"num_attention_heads": 2,
"joint_attention_dim": 32,
"pooled_projection_dim": 32,
"axes_dims_rope": [4, 4, 8],
}
def get_dummy_inputs(self, batch_size: int = 1) -> dict[str, torch.Tensor]:
num_latent_channels = 4
num_image_channels = 3
height = width = 4
sequence_length = 48
embedding_dim = 32
return {
"hidden_states": randn_tensor(
(batch_size, height * width, num_latent_channels), generator=self.generator, device=torch_device
),
"encoder_hidden_states": randn_tensor(
(batch_size, sequence_length, embedding_dim), generator=self.generator, device=torch_device
),
"img_ids": randn_tensor(
(height * width, num_image_channels), generator=self.generator, device=torch_device
),
"txt_ids": randn_tensor(
(sequence_length, num_image_channels), generator=self.generator, device=torch_device
),
"timestep": torch.tensor([1.0]).to(torch_device).expand(batch_size),
"guidance": torch.tensor([3.5]).to(torch_device).expand(batch_size),
}
class TestLongCatImageTransformer(LongCatImageTransformerTesterConfig, ModelTesterMixin):
pass
class TestLongCatImageTransformerTraining(LongCatImageTransformerTesterConfig, TrainingTesterMixin):
def test_gradient_checkpointing_is_applied(self):
expected_set = {"LongCatImageTransformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
class TestLongCatImageTransformerCompile(LongCatImageTransformerTesterConfig, TorchCompileTesterMixin):
pass