Compare commits

..

1 Commits

Author SHA1 Message Date
sayakpaul
5b0c7456f3 move test_hooks.py to pytest 2026-03-10 12:03:03 +05:30
5 changed files with 245 additions and 211 deletions

View File

@@ -36,7 +36,7 @@ from typing import Any, Callable
from packaging import version
from ..utils import deprecate, is_torch_available, is_torchao_available, is_torchao_version, logging
from ..utils import is_torch_available, is_torchao_available, is_torchao_version, logging
if is_torch_available():
@@ -844,8 +844,6 @@ class QuantoConfig(QuantizationConfigMixin):
modules_to_not_convert: list[str] | None = None,
**kwargs,
):
deprecation_message = "`QuantoConfig` is deprecated and will be removed in version 1.0.0."
deprecate("QuantoConfig", "1.0.0", deprecation_message)
self.quant_method = QuantizationMethod.QUANTO
self.weights_dtype = weights_dtype
self.modules_to_not_convert = modules_to_not_convert

View File

@@ -3,7 +3,6 @@ from typing import TYPE_CHECKING, Any
from diffusers.utils.import_utils import is_optimum_quanto_version
from ...utils import (
deprecate,
get_module_from_name,
is_accelerate_available,
is_accelerate_version,
@@ -43,9 +42,6 @@ class QuantoQuantizer(DiffusersQuantizer):
super().__init__(quantization_config, **kwargs)
def validate_environment(self, *args, **kwargs):
deprecation_message = "The Quanto quantizer is deprecated and will be removed in version 1.0.0."
deprecate("QuantoQuantizer", "1.0.0", deprecation_message)
if not is_optimum_quanto_available():
raise ImportError(
"Loading an optimum-quanto quantized model requires optimum-quanto library (`pip install optimum-quanto`)"

View File

@@ -13,8 +13,8 @@
# limitations under the License.
import gc
import unittest
import pytest
import torch
from diffusers.hooks import HookRegistry, ModelHook
@@ -134,20 +134,18 @@ class SkipLayerHook(ModelHook):
return output
class HookTests(unittest.TestCase):
class TestHooks:
in_features = 4
hidden_features = 8
out_features = 4
num_layers = 2
def setUp(self):
def setup_method(self):
params = self.get_module_parameters()
self.model = DummyModel(**params)
self.model.to(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
del self.model
gc.collect()
free_memory()
@@ -171,20 +169,20 @@ class HookTests(unittest.TestCase):
registry_repr = repr(registry)
expected_repr = "HookRegistry(\n (0) add_hook - AddHook\n (1) multiply_hook - MultiplyHook(value=2)\n)"
self.assertEqual(len(registry.hooks), 2)
self.assertEqual(registry._hook_order, ["add_hook", "multiply_hook"])
self.assertEqual(registry_repr, expected_repr)
assert len(registry.hooks) == 2
assert registry._hook_order == ["add_hook", "multiply_hook"]
assert registry_repr == expected_repr
registry.remove_hook("add_hook")
self.assertEqual(len(registry.hooks), 1)
self.assertEqual(registry._hook_order, ["multiply_hook"])
assert len(registry.hooks) == 1
assert registry._hook_order == ["multiply_hook"]
def test_stateful_hook(self):
registry = HookRegistry.check_if_exists_or_initialize(self.model)
registry.register_hook(StatefulAddHook(1), "stateful_add_hook")
self.assertEqual(registry.hooks["stateful_add_hook"].increment, 0)
assert registry.hooks["stateful_add_hook"].increment == 0
input = torch.randn(1, 4, device=torch_device, generator=self.get_generator())
num_repeats = 3
@@ -194,13 +192,13 @@ class HookTests(unittest.TestCase):
if i == 0:
output1 = result
self.assertEqual(registry.get_hook("stateful_add_hook").increment, num_repeats)
assert registry.get_hook("stateful_add_hook").increment == num_repeats
registry.reset_stateful_hooks()
output2 = self.model(input)
self.assertEqual(registry.get_hook("stateful_add_hook").increment, 1)
self.assertTrue(torch.allclose(output1, output2))
assert registry.get_hook("stateful_add_hook").increment == 1
assert torch.allclose(output1, output2)
def test_inference(self):
registry = HookRegistry.check_if_exists_or_initialize(self.model)
@@ -218,9 +216,9 @@ class HookTests(unittest.TestCase):
new_input = input * 2 + 1
output3 = self.model(new_input).mean().detach().cpu().item()
self.assertAlmostEqual(output1, output2, places=5)
self.assertAlmostEqual(output1, output3, places=5)
self.assertAlmostEqual(output2, output3, places=5)
assert output1 == pytest.approx(output2, abs=5e-6)
assert output1 == pytest.approx(output3, abs=5e-6)
assert output2 == pytest.approx(output3, abs=5e-6)
def test_skip_layer_hook(self):
registry = HookRegistry.check_if_exists_or_initialize(self.model)
@@ -228,30 +226,29 @@ class HookTests(unittest.TestCase):
input = torch.zeros(1, 4, device=torch_device)
output = self.model(input).mean().detach().cpu().item()
self.assertEqual(output, 0.0)
assert output == 0.0
registry.remove_hook("skip_layer_hook")
registry.register_hook(SkipLayerHook(skip_layer=False), "skip_layer_hook")
output = self.model(input).mean().detach().cpu().item()
self.assertNotEqual(output, 0.0)
assert output != 0.0
def test_skip_layer_internal_block(self):
registry = HookRegistry.check_if_exists_or_initialize(self.model.linear_1)
input = torch.zeros(1, 4, device=torch_device)
registry.register_hook(SkipLayerHook(skip_layer=True), "skip_layer_hook")
with self.assertRaises(RuntimeError) as cm:
with pytest.raises(RuntimeError, match="mat1 and mat2 shapes cannot be multiplied"):
self.model(input).mean().detach().cpu().item()
self.assertIn("mat1 and mat2 shapes cannot be multiplied", str(cm.exception))
registry.remove_hook("skip_layer_hook")
output = self.model(input).mean().detach().cpu().item()
self.assertNotEqual(output, 0.0)
assert output != 0.0
registry = HookRegistry.check_if_exists_or_initialize(self.model.blocks[1])
registry.register_hook(SkipLayerHook(skip_layer=True), "skip_layer_hook")
output = self.model(input).mean().detach().cpu().item()
self.assertNotEqual(output, 0.0)
assert output != 0.0
def test_invocation_order_stateful_first(self):
registry = HookRegistry.check_if_exists_or_initialize(self.model)
@@ -278,7 +275,7 @@ class HookTests(unittest.TestCase):
.replace(" ", "")
.replace("\n", "")
)
self.assertEqual(output, expected_invocation_order_log)
assert output == expected_invocation_order_log
registry.remove_hook("add_hook")
with CaptureLogger(logger) as cap_logger:
@@ -289,7 +286,7 @@ class HookTests(unittest.TestCase):
.replace(" ", "")
.replace("\n", "")
)
self.assertEqual(output, expected_invocation_order_log)
assert output == expected_invocation_order_log
def test_invocation_order_stateful_middle(self):
registry = HookRegistry.check_if_exists_or_initialize(self.model)
@@ -316,7 +313,7 @@ class HookTests(unittest.TestCase):
.replace(" ", "")
.replace("\n", "")
)
self.assertEqual(output, expected_invocation_order_log)
assert output == expected_invocation_order_log
registry.remove_hook("add_hook")
with CaptureLogger(logger) as cap_logger:
@@ -327,7 +324,7 @@ class HookTests(unittest.TestCase):
.replace(" ", "")
.replace("\n", "")
)
self.assertEqual(output, expected_invocation_order_log)
assert output == expected_invocation_order_log
registry.remove_hook("add_hook_2")
with CaptureLogger(logger) as cap_logger:
@@ -336,7 +333,7 @@ class HookTests(unittest.TestCase):
expected_invocation_order_log = (
("MultiplyHook pre_forward\nMultiplyHook post_forward\n").replace(" ", "").replace("\n", "")
)
self.assertEqual(output, expected_invocation_order_log)
assert output == expected_invocation_order_log
def test_invocation_order_stateful_last(self):
registry = HookRegistry.check_if_exists_or_initialize(self.model)
@@ -363,7 +360,7 @@ class HookTests(unittest.TestCase):
.replace(" ", "")
.replace("\n", "")
)
self.assertEqual(output, expected_invocation_order_log)
assert output == expected_invocation_order_log
registry.remove_hook("add_hook")
with CaptureLogger(logger) as cap_logger:
@@ -374,4 +371,4 @@ class HookTests(unittest.TestCase):
.replace(" ", "")
.replace("\n", "")
)
self.assertEqual(output, expected_invocation_order_log)
assert output == expected_invocation_order_log

View File

@@ -1,3 +1,4 @@
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,47 +13,59 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import LTXVideoTransformer3DModel
from diffusers.utils.torch_utils import randn_tensor
from ...testing_utils import enable_full_determinism, torch_device
from ..testing_utils import (
BaseModelTesterConfig,
MemoryTesterMixin,
ModelTesterMixin,
TorchCompileTesterMixin,
TrainingTesterMixin,
)
from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin
enable_full_determinism()
class LTXTransformerTesterConfig(BaseModelTesterConfig):
@property
def model_class(self):
return LTXVideoTransformer3DModel
class LTXTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = LTXVideoTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def output_shape(self) -> tuple[int, int]:
return (512, 4)
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 2
height = 16
width = 16
embedding_dim = 16
sequence_length = 16
@property
def input_shape(self) -> tuple[int, int]:
return (512, 4)
hidden_states = torch.randn((batch_size, num_frames * height * width, num_channels)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
encoder_attention_mask = torch.ones((batch_size, sequence_length)).bool().to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
@property
def main_input_name(self) -> str:
return "hidden_states"
@property
def generator(self):
return torch.Generator("cpu").manual_seed(0)
def get_init_dict(self):
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
"encoder_attention_mask": encoder_attention_mask,
"num_frames": num_frames,
"height": height,
"width": width,
}
@property
def input_shape(self):
return (512, 4)
@property
def output_shape(self):
return (512, 4)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"in_channels": 4,
"out_channels": 4,
"num_attention_heads": 2,
@@ -62,57 +75,16 @@ class LTXTransformerTesterConfig(BaseModelTesterConfig):
"qk_norm": "rms_norm_across_heads",
"caption_channels": 16,
}
def get_dummy_inputs(self) -> dict[str, torch.Tensor]:
batch_size = 2
num_channels = 4
num_frames = 2
height = 16
width = 16
embedding_dim = 16
sequence_length = 16
return {
"hidden_states": randn_tensor(
(batch_size, num_frames * height * width, num_channels),
generator=self.generator,
device=torch_device,
),
"encoder_hidden_states": randn_tensor(
(batch_size, sequence_length, embedding_dim), generator=self.generator, device=torch_device
),
"timestep": torch.randint(0, 1000, size=(batch_size,), generator=self.generator).to(torch_device),
"encoder_attention_mask": torch.ones((batch_size, sequence_length)).bool().to(torch_device),
"num_frames": num_frames,
"height": height,
"width": width,
}
class TestLTXTransformer(LTXTransformerTesterConfig, ModelTesterMixin):
"""Core model tests for LTX Video Transformer."""
class TestLTXTransformerMemory(LTXTransformerTesterConfig, MemoryTesterMixin):
"""Memory optimization tests for LTX Video Transformer."""
class TestLTXTransformerTraining(LTXTransformerTesterConfig, TrainingTesterMixin):
"""Training tests for LTX Video Transformer."""
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
super().test_gradient_checkpointing_is_applied(expected_set={"LTXVideoTransformer3DModel"})
expected_set = {"LTXVideoTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
class TestLTXTransformerCompile(LTXTransformerTesterConfig, TorchCompileTesterMixin):
"""Torch compile tests for LTX Video Transformer."""
class LTXTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase):
model_class = LTXVideoTransformer3DModel
# TODO: Add pretrained_model_name_or_path once a tiny LTX model is available on the Hub
# class TestLTXTransformerBitsAndBytes(LTXTransformerTesterConfig, BitsAndBytesTesterMixin):
# """BitsAndBytes quantization tests for LTX Video Transformer."""
# TODO: Add pretrained_model_name_or_path once a tiny LTX model is available on the Hub
# class TestLTXTransformerTorchAo(LTXTransformerTesterConfig, TorchAoTesterMixin):
# """TorchAo quantization tests for LTX Video Transformer."""
def prepare_init_args_and_inputs_for_common(self):
return LTXTransformerTests().prepare_init_args_and_inputs_for_common()

View File

@@ -1,3 +1,4 @@
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,49 +13,77 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import unittest
import torch
from diffusers import LTX2VideoTransformer3DModel
from diffusers.utils.torch_utils import randn_tensor
from ...testing_utils import enable_full_determinism, torch_device
from ..testing_utils import (
AttentionTesterMixin,
BaseModelTesterConfig,
MemoryTesterMixin,
ModelTesterMixin,
TorchCompileTesterMixin,
TrainingTesterMixin,
)
from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin
enable_full_determinism()
class LTX2TransformerTesterConfig(BaseModelTesterConfig):
@property
def model_class(self):
return LTX2VideoTransformer3DModel
class LTX2TransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = LTX2VideoTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def output_shape(self) -> tuple[int, int]:
return (512, 4)
def dummy_input(self):
# Common
batch_size = 2
@property
def input_shape(self) -> tuple[int, int]:
return (512, 4)
# Video
num_frames = 2
num_channels = 4
height = 16
width = 16
@property
def main_input_name(self) -> str:
return "hidden_states"
# Audio
audio_num_frames = 9
audio_num_channels = 2
num_mel_bins = 2
@property
def generator(self):
return torch.Generator("cpu").manual_seed(0)
# Text
embedding_dim = 16
sequence_length = 16
hidden_states = torch.randn((batch_size, num_frames * height * width, num_channels)).to(torch_device)
audio_hidden_states = torch.randn((batch_size, audio_num_frames, audio_num_channels * num_mel_bins)).to(
torch_device
)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
audio_encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
encoder_attention_mask = torch.ones((batch_size, sequence_length)).bool().to(torch_device)
timestep = torch.rand((batch_size,)).to(torch_device) * 1000
def get_init_dict(self):
return {
"hidden_states": hidden_states,
"audio_hidden_states": audio_hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"audio_encoder_hidden_states": audio_encoder_hidden_states,
"timestep": timestep,
"encoder_attention_mask": encoder_attention_mask,
"num_frames": num_frames,
"height": height,
"width": width,
"audio_num_frames": audio_num_frames,
"fps": 25.0,
}
@property
def input_shape(self):
return (512, 4)
@property
def output_shape(self):
return (512, 4)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"in_channels": 4,
"out_channels": 4,
"patch_size": 1,
@@ -72,80 +101,122 @@ class LTX2TransformerTesterConfig(BaseModelTesterConfig):
"caption_channels": 16,
"rope_double_precision": False,
}
def get_dummy_inputs(self) -> dict[str, torch.Tensor]:
batch_size = 2
num_frames = 2
num_channels = 4
height = 16
width = 16
audio_num_frames = 9
audio_num_channels = 2
num_mel_bins = 2
embedding_dim = 16
sequence_length = 16
return {
"hidden_states": randn_tensor(
(batch_size, num_frames * height * width, num_channels),
generator=self.generator,
device=torch_device,
),
"audio_hidden_states": randn_tensor(
(batch_size, audio_num_frames, audio_num_channels * num_mel_bins),
generator=self.generator,
device=torch_device,
),
"encoder_hidden_states": randn_tensor(
(batch_size, sequence_length, embedding_dim), generator=self.generator, device=torch_device
),
"audio_encoder_hidden_states": randn_tensor(
(batch_size, sequence_length, embedding_dim), generator=self.generator, device=torch_device
),
"timestep": (randn_tensor((batch_size,), generator=self.generator, device=torch_device).abs() * 1000),
"encoder_attention_mask": torch.ones((batch_size, sequence_length)).bool().to(torch_device),
"num_frames": num_frames,
"height": height,
"width": width,
"audio_num_frames": audio_num_frames,
"fps": 25.0,
}
class TestLTX2Transformer(LTX2TransformerTesterConfig, ModelTesterMixin):
"""Core model tests for LTX2 Video Transformer."""
class TestLTX2TransformerMemory(LTX2TransformerTesterConfig, MemoryTesterMixin):
"""Memory optimization tests for LTX2 Video Transformer."""
class TestLTX2TransformerTraining(LTX2TransformerTesterConfig, TrainingTesterMixin):
"""Training tests for LTX2 Video Transformer."""
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
super().test_gradient_checkpointing_is_applied(expected_set={"LTX2VideoTransformer3DModel"})
expected_set = {"LTX2VideoTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
# def test_ltx2_consistency(self, seed=0, dtype=torch.float32):
# torch.manual_seed(seed)
# init_dict, _ = self.prepare_init_args_and_inputs_for_common()
# # Calculate dummy inputs in a custom manner to ensure compatibility with original code
# batch_size = 2
# num_frames = 9
# latent_frames = 2
# text_embedding_dim = 16
# text_seq_len = 16
# fps = 25.0
# sampling_rate = 16000.0
# hop_length = 160.0
# sigma = torch.rand((1,), generator=torch.manual_seed(seed), dtype=dtype, device="cpu") * 1000
# timestep = (sigma * torch.ones((batch_size,), dtype=dtype, device="cpu")).to(device=torch_device)
# num_channels = 4
# latent_height = 4
# latent_width = 4
# hidden_states = torch.randn(
# (batch_size, num_channels, latent_frames, latent_height, latent_width),
# generator=torch.manual_seed(seed),
# dtype=dtype,
# device="cpu",
# )
# # Patchify video latents (with patch_size (1, 1, 1))
# hidden_states = hidden_states.reshape(batch_size, -1, latent_frames, 1, latent_height, 1, latent_width, 1)
# hidden_states = hidden_states.permute(0, 2, 4, 6, 1, 3, 5, 7).flatten(4, 7).flatten(1, 3)
# encoder_hidden_states = torch.randn(
# (batch_size, text_seq_len, text_embedding_dim),
# generator=torch.manual_seed(seed),
# dtype=dtype,
# device="cpu",
# )
# audio_num_channels = 2
# num_mel_bins = 2
# latent_length = int((sampling_rate / hop_length / 4) * (num_frames / fps))
# audio_hidden_states = torch.randn(
# (batch_size, audio_num_channels, latent_length, num_mel_bins),
# generator=torch.manual_seed(seed),
# dtype=dtype,
# device="cpu",
# )
# # Patchify audio latents
# audio_hidden_states = audio_hidden_states.transpose(1, 2).flatten(2, 3)
# audio_encoder_hidden_states = torch.randn(
# (batch_size, text_seq_len, text_embedding_dim),
# generator=torch.manual_seed(seed),
# dtype=dtype,
# device="cpu",
# )
# inputs_dict = {
# "hidden_states": hidden_states.to(device=torch_device),
# "audio_hidden_states": audio_hidden_states.to(device=torch_device),
# "encoder_hidden_states": encoder_hidden_states.to(device=torch_device),
# "audio_encoder_hidden_states": audio_encoder_hidden_states.to(device=torch_device),
# "timestep": timestep,
# "num_frames": latent_frames,
# "height": latent_height,
# "width": latent_width,
# "audio_num_frames": num_frames,
# "fps": 25.0,
# }
# model = self.model_class.from_pretrained(
# "diffusers-internal-dev/dummy-ltx2",
# subfolder="transformer",
# device_map="cpu",
# )
# # torch.manual_seed(seed)
# # model = self.model_class(**init_dict)
# model.to(torch_device)
# model.eval()
# with attention_backend("native"):
# with torch.no_grad():
# output = model(**inputs_dict)
# video_output, audio_output = output.to_tuple()
# self.assertIsNotNone(video_output)
# self.assertIsNotNone(audio_output)
# # input & output have to have the same shape
# video_expected_shape = (batch_size, latent_frames * latent_height * latent_width, num_channels)
# self.assertEqual(video_output.shape, video_expected_shape, "Video input and output shapes do not match")
# audio_expected_shape = (batch_size, latent_length, audio_num_channels * num_mel_bins)
# self.assertEqual(audio_output.shape, audio_expected_shape, "Audio input and output shapes do not match")
# # Check against expected slice
# # fmt: off
# video_expected_slice = torch.tensor([0.4783, 1.6954, -1.2092, 0.1762, 0.7801, 1.2025, -1.4525, -0.2721, 0.3354, 1.9144, -1.5546, 0.0831, 0.4391, 1.7012, -1.7373, -0.2676])
# audio_expected_slice = torch.tensor([-0.4236, 0.4750, 0.3901, -0.4339, -0.2782, 0.4357, 0.4526, -0.3927, -0.0980, 0.4870, 0.3964, -0.3169, -0.3974, 0.4408, 0.3809, -0.4692])
# # fmt: on
# video_output_flat = video_output.cpu().flatten().float()
# video_generated_slice = torch.cat([video_output_flat[:8], video_output_flat[-8:]])
# self.assertTrue(torch.allclose(video_generated_slice, video_expected_slice, atol=1e-4))
# audio_output_flat = audio_output.cpu().flatten().float()
# audio_generated_slice = torch.cat([audio_output_flat[:8], audio_output_flat[-8:]])
# self.assertTrue(torch.allclose(audio_generated_slice, audio_expected_slice, atol=1e-4))
class TestLTX2TransformerAttention(LTX2TransformerTesterConfig, AttentionTesterMixin):
"""Attention processor tests for LTX2 Video Transformer."""
class LTX2TransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase):
model_class = LTX2VideoTransformer3DModel
@pytest.mark.skip(
"LTX2Attention does not set is_cross_attention, so fuse_projections tries to fuse Q+K+V together even for cross-attention modules with different input dimensions."
)
def test_fuse_unfuse_qkv_projections(self, atol=1e-3, rtol=0):
pass
class TestLTX2TransformerCompile(LTX2TransformerTesterConfig, TorchCompileTesterMixin):
"""Torch compile tests for LTX2 Video Transformer."""
# TODO: Add pretrained_model_name_or_path once a tiny LTX2 model is available on the Hub
# class TestLTX2TransformerBitsAndBytes(LTX2TransformerTesterConfig, BitsAndBytesTesterMixin):
# """BitsAndBytes quantization tests for LTX2 Video Transformer."""
# TODO: Add pretrained_model_name_or_path once a tiny LTX2 model is available on the Hub
# class TestLTX2TransformerTorchAo(LTX2TransformerTesterConfig, TorchAoTesterMixin):
# """TorchAo quantization tests for LTX2 Video Transformer."""
def prepare_init_args_and_inputs_for_common(self):
return LTX2TransformerTests().prepare_init_args_and_inputs_for_common()