mirror of
https://github.com/huggingface/diffusers.git
synced 2026-04-09 17:27:07 +08:00
Compare commits
3 Commits
optimizati
...
sd3-test-r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
294a5f0d65 | ||
|
|
6ec4dee783 | ||
|
|
50015c966a |
@@ -13,58 +13,63 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
from diffusers import SD3Transformer2DModel
|
from diffusers import SD3Transformer2DModel
|
||||||
from diffusers.utils.import_utils import is_xformers_available
|
from diffusers.utils.torch_utils import randn_tensor
|
||||||
|
|
||||||
from ...testing_utils import (
|
from ...testing_utils import enable_full_determinism, torch_device
|
||||||
enable_full_determinism,
|
from ..testing_utils import (
|
||||||
torch_device,
|
BaseModelTesterConfig,
|
||||||
|
BitsAndBytesTesterMixin,
|
||||||
|
ModelTesterMixin,
|
||||||
|
TorchAoTesterMixin,
|
||||||
|
TorchCompileTesterMixin,
|
||||||
|
TrainingTesterMixin,
|
||||||
)
|
)
|
||||||
from ..test_modeling_common import ModelTesterMixin
|
|
||||||
|
|
||||||
|
|
||||||
enable_full_determinism()
|
enable_full_determinism()
|
||||||
|
|
||||||
|
|
||||||
class SD3TransformerTests(ModelTesterMixin, unittest.TestCase):
|
# ======================== SD3 Transformer ========================
|
||||||
model_class = SD3Transformer2DModel
|
|
||||||
main_input_name = "hidden_states"
|
|
||||||
model_split_percents = [0.8, 0.8, 0.9]
|
class SD3TransformerTesterConfig(BaseModelTesterConfig):
|
||||||
|
@property
|
||||||
|
def model_class(self):
|
||||||
|
return SD3Transformer2DModel
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def dummy_input(self):
|
def pretrained_model_name_or_path(self):
|
||||||
batch_size = 2
|
return "hf-internal-testing/tiny-sd3-pipe"
|
||||||
num_channels = 4
|
|
||||||
height = width = embedding_dim = 32
|
|
||||||
pooled_embedding_dim = embedding_dim * 2
|
|
||||||
sequence_length = 154
|
|
||||||
|
|
||||||
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
|
@property
|
||||||
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
|
def pretrained_model_kwargs(self):
|
||||||
pooled_prompt_embeds = torch.randn((batch_size, pooled_embedding_dim)).to(torch_device)
|
return {"subfolder": "transformer"}
|
||||||
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
|
|
||||||
|
|
||||||
|
@property
|
||||||
|
def main_input_name(self) -> str:
|
||||||
|
return "hidden_states"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def model_split_percents(self) -> list:
|
||||||
|
return [0.8, 0.8, 0.9]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def output_shape(self) -> tuple:
|
||||||
|
return (4, 32, 32)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def input_shape(self) -> tuple:
|
||||||
|
return (4, 32, 32)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def generator(self):
|
||||||
|
return torch.Generator("cpu").manual_seed(0)
|
||||||
|
|
||||||
|
def get_init_dict(self) -> dict:
|
||||||
return {
|
return {
|
||||||
"hidden_states": hidden_states,
|
|
||||||
"encoder_hidden_states": encoder_hidden_states,
|
|
||||||
"pooled_projections": pooled_prompt_embeds,
|
|
||||||
"timestep": timestep,
|
|
||||||
}
|
|
||||||
|
|
||||||
@property
|
|
||||||
def input_shape(self):
|
|
||||||
return (4, 32, 32)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def output_shape(self):
|
|
||||||
return (4, 32, 32)
|
|
||||||
|
|
||||||
def prepare_init_args_and_inputs_for_common(self):
|
|
||||||
init_dict = {
|
|
||||||
"sample_size": 32,
|
"sample_size": 32,
|
||||||
"patch_size": 1,
|
"patch_size": 1,
|
||||||
"in_channels": 4,
|
"in_channels": 4,
|
||||||
@@ -79,67 +84,79 @@ class SD3TransformerTests(ModelTesterMixin, unittest.TestCase):
|
|||||||
"dual_attention_layers": (),
|
"dual_attention_layers": (),
|
||||||
"qk_norm": None,
|
"qk_norm": None,
|
||||||
}
|
}
|
||||||
inputs_dict = self.dummy_input
|
|
||||||
return init_dict, inputs_dict
|
|
||||||
|
|
||||||
@unittest.skipIf(
|
def get_dummy_inputs(self, batch_size: int = 2) -> dict[str, torch.Tensor]:
|
||||||
torch_device != "cuda" or not is_xformers_available(),
|
|
||||||
reason="XFormers attention is only available with CUDA and `xformers` installed",
|
|
||||||
)
|
|
||||||
def test_xformers_enable_works(self):
|
|
||||||
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
|
|
||||||
model = self.model_class(**init_dict)
|
|
||||||
|
|
||||||
model.enable_xformers_memory_efficient_attention()
|
|
||||||
|
|
||||||
assert model.transformer_blocks[0].attn.processor.__class__.__name__ == "XFormersJointAttnProcessor", (
|
|
||||||
"xformers is not enabled"
|
|
||||||
)
|
|
||||||
|
|
||||||
@unittest.skip("SD3Transformer2DModel uses a dedicated attention processor. This test doesn't apply")
|
|
||||||
def test_set_attn_processor_for_determinism(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_gradient_checkpointing_is_applied(self):
|
|
||||||
expected_set = {"SD3Transformer2DModel"}
|
|
||||||
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
|
||||||
|
|
||||||
|
|
||||||
class SD35TransformerTests(ModelTesterMixin, unittest.TestCase):
|
|
||||||
model_class = SD3Transformer2DModel
|
|
||||||
main_input_name = "hidden_states"
|
|
||||||
model_split_percents = [0.8, 0.8, 0.9]
|
|
||||||
|
|
||||||
@property
|
|
||||||
def dummy_input(self):
|
|
||||||
batch_size = 2
|
|
||||||
num_channels = 4
|
num_channels = 4
|
||||||
height = width = embedding_dim = 32
|
height = width = embedding_dim = 32
|
||||||
pooled_embedding_dim = embedding_dim * 2
|
pooled_embedding_dim = embedding_dim * 2
|
||||||
sequence_length = 154
|
sequence_length = 154
|
||||||
|
|
||||||
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
|
|
||||||
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
|
|
||||||
pooled_prompt_embeds = torch.randn((batch_size, pooled_embedding_dim)).to(torch_device)
|
|
||||||
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"hidden_states": hidden_states,
|
"hidden_states": randn_tensor(
|
||||||
"encoder_hidden_states": encoder_hidden_states,
|
(batch_size, num_channels, height, width), generator=self.generator, device=torch_device
|
||||||
"pooled_projections": pooled_prompt_embeds,
|
),
|
||||||
"timestep": timestep,
|
"encoder_hidden_states": randn_tensor(
|
||||||
|
(batch_size, sequence_length, embedding_dim), generator=self.generator, device=torch_device
|
||||||
|
),
|
||||||
|
"pooled_projections": randn_tensor(
|
||||||
|
(batch_size, pooled_embedding_dim), generator=self.generator, device=torch_device
|
||||||
|
),
|
||||||
|
"timestep": torch.randint(0, 1000, size=(batch_size,), generator=self.generator).to(torch_device),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class TestSD3Transformer(SD3TransformerTesterConfig, ModelTesterMixin):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class TestSD3TransformerTraining(SD3TransformerTesterConfig, TrainingTesterMixin):
|
||||||
|
def test_gradient_checkpointing_is_applied(self):
|
||||||
|
expected_set = {"SD3Transformer2DModel"}
|
||||||
|
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
||||||
|
|
||||||
|
|
||||||
|
class TestSD3TransformerCompile(SD3TransformerTesterConfig, TorchCompileTesterMixin):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# ======================== SD3.5 Transformer ========================
|
||||||
|
|
||||||
|
|
||||||
|
class SD35TransformerTesterConfig(BaseModelTesterConfig):
|
||||||
@property
|
@property
|
||||||
def input_shape(self):
|
def model_class(self):
|
||||||
|
return SD3Transformer2DModel
|
||||||
|
|
||||||
|
@property
|
||||||
|
def pretrained_model_name_or_path(self):
|
||||||
|
return "hf-internal-testing/tiny-sd35-pipe"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def pretrained_model_kwargs(self):
|
||||||
|
return {"subfolder": "transformer"}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def main_input_name(self) -> str:
|
||||||
|
return "hidden_states"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def model_split_percents(self) -> list:
|
||||||
|
return [0.8, 0.8, 0.9]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def output_shape(self) -> tuple:
|
||||||
return (4, 32, 32)
|
return (4, 32, 32)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def output_shape(self):
|
def input_shape(self) -> tuple:
|
||||||
return (4, 32, 32)
|
return (4, 32, 32)
|
||||||
|
|
||||||
def prepare_init_args_and_inputs_for_common(self):
|
@property
|
||||||
init_dict = {
|
def generator(self):
|
||||||
|
return torch.Generator("cpu").manual_seed(0)
|
||||||
|
|
||||||
|
def get_init_dict(self) -> dict:
|
||||||
|
return {
|
||||||
"sample_size": 32,
|
"sample_size": 32,
|
||||||
"patch_size": 1,
|
"patch_size": 1,
|
||||||
"in_channels": 4,
|
"in_channels": 4,
|
||||||
@@ -154,47 +171,56 @@ class SD35TransformerTests(ModelTesterMixin, unittest.TestCase):
|
|||||||
"dual_attention_layers": (0,),
|
"dual_attention_layers": (0,),
|
||||||
"qk_norm": "rms_norm",
|
"qk_norm": "rms_norm",
|
||||||
}
|
}
|
||||||
inputs_dict = self.dummy_input
|
|
||||||
return init_dict, inputs_dict
|
|
||||||
|
|
||||||
@unittest.skipIf(
|
def get_dummy_inputs(self, batch_size: int = 2) -> dict[str, torch.Tensor]:
|
||||||
torch_device != "cuda" or not is_xformers_available(),
|
num_channels = 4
|
||||||
reason="XFormers attention is only available with CUDA and `xformers` installed",
|
height = width = embedding_dim = 32
|
||||||
)
|
pooled_embedding_dim = embedding_dim * 2
|
||||||
def test_xformers_enable_works(self):
|
sequence_length = 154
|
||||||
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
|
|
||||||
model = self.model_class(**init_dict)
|
|
||||||
|
|
||||||
model.enable_xformers_memory_efficient_attention()
|
return {
|
||||||
|
"hidden_states": randn_tensor(
|
||||||
|
(batch_size, num_channels, height, width), generator=self.generator, device=torch_device
|
||||||
|
),
|
||||||
|
"encoder_hidden_states": randn_tensor(
|
||||||
|
(batch_size, sequence_length, embedding_dim), generator=self.generator, device=torch_device
|
||||||
|
),
|
||||||
|
"pooled_projections": randn_tensor(
|
||||||
|
(batch_size, pooled_embedding_dim), generator=self.generator, device=torch_device
|
||||||
|
),
|
||||||
|
"timestep": torch.randint(0, 1000, size=(batch_size,), generator=self.generator).to(torch_device),
|
||||||
|
}
|
||||||
|
|
||||||
assert model.transformer_blocks[0].attn.processor.__class__.__name__ == "XFormersJointAttnProcessor", (
|
|
||||||
"xformers is not enabled"
|
|
||||||
)
|
|
||||||
|
|
||||||
@unittest.skip("SD3Transformer2DModel uses a dedicated attention processor. This test doesn't apply")
|
|
||||||
def test_set_attn_processor_for_determinism(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_gradient_checkpointing_is_applied(self):
|
|
||||||
expected_set = {"SD3Transformer2DModel"}
|
|
||||||
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
|
||||||
|
|
||||||
|
class TestSD35Transformer(SD35TransformerTesterConfig, ModelTesterMixin):
|
||||||
def test_skip_layers(self):
|
def test_skip_layers(self):
|
||||||
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
|
init_dict = self.get_init_dict()
|
||||||
|
inputs_dict = self.get_dummy_inputs()
|
||||||
model = self.model_class(**init_dict).to(torch_device)
|
model = self.model_class(**init_dict).to(torch_device)
|
||||||
|
|
||||||
# Forward pass without skipping layers
|
|
||||||
output_full = model(**inputs_dict).sample
|
output_full = model(**inputs_dict).sample
|
||||||
|
|
||||||
# Forward pass with skipping layers 0 (since there's only one layer in this test setup)
|
|
||||||
inputs_dict_with_skip = inputs_dict.copy()
|
inputs_dict_with_skip = inputs_dict.copy()
|
||||||
inputs_dict_with_skip["skip_layers"] = [0]
|
inputs_dict_with_skip["skip_layers"] = [0]
|
||||||
output_skip = model(**inputs_dict_with_skip).sample
|
output_skip = model(**inputs_dict_with_skip).sample
|
||||||
|
|
||||||
# Check that the outputs are different
|
assert not torch.allclose(output_full, output_skip, atol=1e-5), "Outputs should differ when layers are skipped"
|
||||||
self.assertFalse(
|
assert output_full.shape == output_skip.shape, "Outputs should have the same shape"
|
||||||
torch.allclose(output_full, output_skip, atol=1e-5), "Outputs should differ when layers are skipped"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check that the outputs have the same shape
|
|
||||||
self.assertEqual(output_full.shape, output_skip.shape, "Outputs should have the same shape")
|
class TestSD35TransformerTraining(SD35TransformerTesterConfig, TrainingTesterMixin):
|
||||||
|
def test_gradient_checkpointing_is_applied(self):
|
||||||
|
expected_set = {"SD3Transformer2DModel"}
|
||||||
|
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
||||||
|
|
||||||
|
|
||||||
|
class TestSD35TransformerCompile(SD35TransformerTesterConfig, TorchCompileTesterMixin):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class TestSD35TransformerBitsAndBytes(SD35TransformerTesterConfig, BitsAndBytesTesterMixin):
|
||||||
|
"""BitsAndBytes quantization tests for SD3.5 Transformer."""
|
||||||
|
|
||||||
|
|
||||||
|
class TestSD35TransformerTorchAo(SD35TransformerTesterConfig, TorchAoTesterMixin):
|
||||||
|
"""TorchAO quantization tests for SD3.5 Transformer."""
|
||||||
|
|||||||
Reference in New Issue
Block a user