Compare commits

...

6 Commits

Author SHA1 Message Date
Dhruv Nair
93bf3bda52 Merge branch 'fast-gpu-test-fixes' of https://github.com/huggingface/diffusers into fast-gpu-test-fixes 2024-09-03 06:58:33 +00:00
Dhruv Nair
1e27fa56d2 update 2024-09-03 06:58:13 +00:00
Dhruv Nair
062125d0e5 Merge branch 'main' into fast-gpu-test-fixes 2024-09-03 11:10:25 +05:30
Dhruv Nair
36d4cd4075 update 2024-09-02 13:58:47 +00:00
Dhruv Nair
08620d80e4 update 2024-09-02 13:41:49 +00:00
Dhruv Nair
d69121118c update 2024-09-02 13:34:07 +00:00
5 changed files with 9 additions and 1 deletions

View File

@@ -157,11 +157,12 @@ class StableDiffusionLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase):
if ("adapter-1" in n or "adapter-2" in n) and not isinstance(m, (nn.Dropout, nn.Identity)):
self.assertTrue(m.weight.device != torch.device("cpu"))
@slow
@require_torch_gpu
def test_integration_move_lora_dora_cpu(self):
from peft import LoraConfig
path = "runwayml/stable-diffusion-v1-5"
path = "Lykon/dreamshaper-8"
unet_lora_config = LoraConfig(
init_lora_weights="gaussian",
target_modules=["to_k", "to_q", "to_v", "to_out.0"],

View File

@@ -528,6 +528,10 @@ class AutoencoderOobleckTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCa
def test_forward_with_norm_groups(self):
pass
@unittest.skip("No attention module used in this model")
def test_set_attn_processor_for_determinism(self):
return
@slow
class AutoencoderTinyIntegrationTests(unittest.TestCase):

View File

@@ -220,6 +220,7 @@ class ModelTesterMixin:
base_precision = 1e-3
forward_requires_fresh_args = False
model_split_percents = [0.5, 0.7, 0.9]
uses_custom_attn_processor = False
def check_device_map_is_respected(self, model, device_map):
for param_name, param in model.named_parameters():

View File

@@ -32,6 +32,7 @@ enable_full_determinism()
class CogVideoXTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):

View File

@@ -32,6 +32,7 @@ enable_full_determinism()
class LuminaNextDiT2DModelTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = LuminaNextDiT2DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):