mirror of
https://github.com/huggingface/diffusers.git
synced 2025-12-22 20:34:51 +08:00
Compare commits
3 Commits
qwenimage-
...
test-clean
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bce948028d | ||
|
|
3efb737371 | ||
|
|
fcb4ee5c11 |
@@ -81,6 +81,11 @@ class StableDiffusionLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase):
|
|||||||
"latent_channels": 4,
|
"latent_channels": 4,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
@@ -150,6 +155,11 @@ class StableDiffusionLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase):
|
|||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
@require_peft_backend
|
@require_peft_backend
|
||||||
class LoraIntegrationTests(unittest.TestCase):
|
class LoraIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -90,6 +90,11 @@ class StableDiffusionXLLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase):
|
|||||||
"sample_size": 128,
|
"sample_size": 128,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
@@ -100,6 +105,11 @@ class StableDiffusionXLLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase):
|
|||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
@require_peft_backend
|
@require_peft_backend
|
||||||
class LoraSDXLIntegrationTests(unittest.TestCase):
|
class LoraSDXLIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -1017,6 +1017,12 @@ class AsymmetricAutoencoderKLIntegrationTests(unittest.TestCase):
|
|||||||
|
|
||||||
@slow
|
@slow
|
||||||
class ConsistencyDecoderVAEIntegrationTests(unittest.TestCase):
|
class ConsistencyDecoderVAEIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -303,6 +303,12 @@ class AnimateDiffPipelineFastTests(
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class AnimateDiffPipelineSlowTests(unittest.TestCase):
|
class AnimateDiffPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -371,6 +371,11 @@ class AudioLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
|||||||
|
|
||||||
@nightly
|
@nightly
|
||||||
class AudioLDMPipelineSlowTests(unittest.TestCase):
|
class AudioLDMPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
@@ -411,6 +416,11 @@ class AudioLDMPipelineSlowTests(unittest.TestCase):
|
|||||||
|
|
||||||
@nightly
|
@nightly
|
||||||
class AudioLDMPipelineNightlyTests(unittest.TestCase):
|
class AudioLDMPipelineNightlyTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -493,6 +493,11 @@ class AudioLDM2PipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
|||||||
|
|
||||||
@nightly
|
@nightly
|
||||||
class AudioLDM2PipelineSlowTests(unittest.TestCase):
|
class AudioLDM2PipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -170,6 +170,11 @@ class ConsistencyModelPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class ConsistencyModelPipelineSlowTests(unittest.TestCase):
|
class ConsistencyModelPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -684,6 +684,11 @@ class StableDiffusionMultiControlNetOneModelPipelineFastTests(
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class ControlNetPipelineSlowTests(unittest.TestCase):
|
class ControlNetPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
@@ -1135,6 +1140,11 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionMultiControlNetPipelineSlowTests(unittest.TestCase):
|
class StableDiffusionMultiControlNetPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -382,6 +382,11 @@ class StableDiffusionMultiControlNetPipelineFastTests(
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class ControlNetImg2ImgPipelineSlowTests(unittest.TestCase):
|
class ControlNetImg2ImgPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -445,6 +445,11 @@ class MultiControlNetInpaintPipelineFastTests(
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class ControlNetInpaintPipelineSlowTests(unittest.TestCase):
|
class ControlNetInpaintPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -884,6 +884,11 @@ class StableDiffusionXLMultiControlNetOneModelPipelineFastTests(
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class ControlNetSDXLPipelineSlowTests(unittest.TestCase):
|
class ControlNetSDXLPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -118,6 +118,12 @@ class DanceDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class PipelineIntegrationTests(unittest.TestCase):
|
class PipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -109,6 +109,11 @@ class DiTPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class DiTPipelineIntegrationTests(unittest.TestCase):
|
class DiTPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -229,6 +229,12 @@ class I2VGenXLPipelineFastTests(SDFunctionTesterMixin, PipelineTesterMixin, unit
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class I2VGenXLPipelineSlowTests(unittest.TestCase):
|
class I2VGenXLPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -365,6 +365,12 @@ class KandinskyImg2ImgPipelineIntegrationTests(unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class KandinskyImg2ImgPipelineNightlyTests(unittest.TestCase):
|
class KandinskyImg2ImgPipelineNightlyTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -138,6 +138,11 @@ class LDMTextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class LDMTextToImagePipelineSlowTests(unittest.TestCase):
|
class LDMTextToImagePipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
@@ -174,6 +179,11 @@ class LDMTextToImagePipelineSlowTests(unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class LDMTextToImagePipelineNightlyTests(unittest.TestCase):
|
class LDMTextToImagePipelineNightlyTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -204,6 +204,11 @@ class LEditsPPPipelineStableDiffusionFastTests(unittest.TestCase):
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class LEditsPPPipelineStableDiffusionSlowTests(unittest.TestCase):
|
class LEditsPPPipelineStableDiffusionSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -408,6 +408,11 @@ class MusicLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class MusicLDMPipelineNightlyTests(unittest.TestCase):
|
class MusicLDMPipelineNightlyTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -174,6 +174,12 @@ class PaintByExamplePipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class PaintByExamplePipelineIntegrationTests(unittest.TestCase):
|
class PaintByExamplePipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -332,6 +332,11 @@ class PixArtAlphaPipelineIntegrationTests(unittest.TestCase):
|
|||||||
ckpt_id_512 = "PixArt-alpha/PixArt-XL-2-512x512"
|
ckpt_id_512 = "PixArt-alpha/PixArt-XL-2-512x512"
|
||||||
prompt = "A small cactus with a happy face in the Sahara desert."
|
prompt = "A small cactus with a happy face in the Sahara desert."
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -37,6 +37,12 @@ enable_full_determinism()
|
|||||||
|
|
||||||
|
|
||||||
class SafeDiffusionPipelineFastTests(unittest.TestCase):
|
class SafeDiffusionPipelineFastTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
@@ -267,6 +273,12 @@ class SafeDiffusionPipelineFastTests(unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase):
|
class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -224,6 +224,12 @@ class ShapEPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class ShapEPipelineIntegrationTests(unittest.TestCase):
|
class ShapEPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -250,6 +250,12 @@ class ShapEImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class ShapEImg2ImgPipelineIntegrationTests(unittest.TestCase):
|
class ShapEImg2ImgPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -311,6 +311,12 @@ class StableCascadeDecoderPipelineFastTests(PipelineTesterMixin, unittest.TestCa
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableCascadeDecoderPipelineIntegrationTests(unittest.TestCase):
|
class StableCascadeDecoderPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -313,6 +313,12 @@ class StableCascadePriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableCascadePriorPipelineIntegrationTests(unittest.TestCase):
|
class StableCascadePriorPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -1238,6 +1238,11 @@ class StableDiffusionPipelineSlowTests(unittest.TestCase):
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionPipelineCkptTests(unittest.TestCase):
|
class StableDiffusionPipelineCkptTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
@@ -1332,6 +1337,11 @@ class StableDiffusionPipelineCkptTests(unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionPipelineNightlyTests(unittest.TestCase):
|
class StableDiffusionPipelineNightlyTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -389,6 +389,11 @@ class StableDiffusionImg2ImgPipelineFastTests(
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase):
|
class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
@@ -648,6 +653,11 @@ class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
|
class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -1074,6 +1074,11 @@ class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.Te
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionInpaintPipelineNightlyTests(unittest.TestCase):
|
class StableDiffusionInpaintPipelineNightlyTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -271,6 +271,11 @@ class StableDiffusionInstructPix2PixPipelineFastTests(
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionInstructPix2PixPipelineSlowTests(unittest.TestCase):
|
class StableDiffusionInstructPix2PixPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -202,6 +202,11 @@ class StableDiffusionAttendAndExcitePipelineIntegrationTests(unittest.TestCase):
|
|||||||
super().tearDownClass()
|
super().tearDownClass()
|
||||||
torch.use_deterministic_algorithms(True)
|
torch.use_deterministic_algorithms(True)
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -378,6 +378,11 @@ class StableDiffusionDepth2ImgPipelineFastTests(
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase):
|
class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
@@ -517,6 +522,11 @@ class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
|
class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -293,6 +293,11 @@ class StableDiffusionDiffEditPipelineFastTests(PipelineLatentTesterMixin, Pipeli
|
|||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
@nightly
|
@nightly
|
||||||
class StableDiffusionDiffEditPipelineIntegrationTests(unittest.TestCase):
|
class StableDiffusionDiffEditPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
@@ -365,6 +370,11 @@ class StableDiffusionDiffEditPipelineIntegrationTests(unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionDiffEditPipelineNightlyTests(unittest.TestCase):
|
class StableDiffusionDiffEditPipelineNightlyTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -156,6 +156,12 @@ class StableDiffusion2InpaintPipelineFastTests(
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
|
class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -243,6 +243,11 @@ class StableDiffusionLatentUpscalePipelineFastTests(
|
|||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
@slow
|
@slow
|
||||||
class StableDiffusionLatentUpscalePipelineIntegrationTests(unittest.TestCase):
|
class StableDiffusionLatentUpscalePipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -40,6 +40,12 @@ enable_full_determinism()
|
|||||||
|
|
||||||
|
|
||||||
class StableDiffusionUpscalePipelineFastTests(unittest.TestCase):
|
class StableDiffusionUpscalePipelineFastTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
@@ -377,6 +383,12 @@ class StableDiffusionUpscalePipelineFastTests(unittest.TestCase):
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase):
|
class StableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -45,6 +45,12 @@ enable_full_determinism()
|
|||||||
|
|
||||||
|
|
||||||
class StableDiffusion2VPredictionPipelineFastTests(unittest.TestCase):
|
class StableDiffusion2VPredictionPipelineFastTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
@@ -254,6 +260,12 @@ class StableDiffusion2VPredictionPipelineFastTests(unittest.TestCase):
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
|
class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -593,6 +593,11 @@ class StableDiffusionMultiAdapterPipelineFastTests(AdapterTests, PipelineTesterM
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionAdapterPipelineSlowTests(unittest.TestCase):
|
class StableDiffusionAdapterPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -164,6 +164,11 @@ class StableDiffusionImageVariationPipelineFastTests(
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionImageVariationPipelineSlowTests(unittest.TestCase):
|
class StableDiffusionImageVariationPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
@@ -274,6 +279,11 @@ class StableDiffusionImageVariationPipelineSlowTests(unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionImageVariationPipelineNightlyTests(unittest.TestCase):
|
class StableDiffusionImageVariationPipelineNightlyTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -29,6 +29,12 @@ enable_full_determinism()
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionPipelineIntegrationTests(unittest.TestCase):
|
class StableDiffusionPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -207,6 +207,11 @@ class StableDiffusionLDM3DPipelineFastTests(unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionLDM3DPipelineSlowTests(unittest.TestCase):
|
class StableDiffusionLDM3DPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
@@ -253,6 +258,11 @@ class StableDiffusionLDM3DPipelineSlowTests(unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionPipelineNightlyTests(unittest.TestCase):
|
class StableDiffusionPipelineNightlyTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -253,6 +253,11 @@ class StableDiffusionPanoramaPipelineFastTests(
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionPanoramaNightlyTests(unittest.TestCase):
|
class StableDiffusionPanoramaNightlyTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -28,6 +28,12 @@ from diffusers.utils.testing_utils import floats_tensor, nightly, require_torch_
|
|||||||
|
|
||||||
|
|
||||||
class SafeDiffusionPipelineFastTests(unittest.TestCase):
|
class SafeDiffusionPipelineFastTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
@@ -258,6 +264,12 @@ class SafeDiffusionPipelineFastTests(unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class SafeDiffusionPipelineIntegrationTests(unittest.TestCase):
|
class SafeDiffusionPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -146,6 +146,12 @@ class StableDiffusionSAGPipelineFastTests(PipelineLatentTesterMixin, PipelineTes
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionPipelineIntegrationTests(unittest.TestCase):
|
class StableDiffusionPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -1005,6 +1005,11 @@ class StableDiffusionXLPipelineFastTests(
|
|||||||
|
|
||||||
@slow
|
@slow
|
||||||
class StableDiffusionXLPipelineIntegrationTests(unittest.TestCase):
|
class StableDiffusionXLPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -668,6 +668,11 @@ class StableDiffusionXLMultiAdapterPipelineFastTests(
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class AdapterSDXLPipelineSlowTests(unittest.TestCase):
|
class AdapterSDXLPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -31,6 +31,12 @@ enable_full_determinism()
|
|||||||
class StableDiffusionXLKPipelineIntegrationTests(unittest.TestCase):
|
class StableDiffusionXLKPipelineIntegrationTests(unittest.TestCase):
|
||||||
dtype = torch.float16
|
dtype = torch.float16
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -188,6 +188,12 @@ class StableUnCLIPPipelineFastTests(
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableUnCLIPPipelineIntegrationTests(unittest.TestCase):
|
class StableUnCLIPPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -209,6 +209,12 @@ class StableUnCLIPImg2ImgPipelineFastTests(
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableUnCLIPImg2ImgPipelineIntegrationTests(unittest.TestCase):
|
class StableUnCLIPImg2ImgPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -516,6 +516,12 @@ class StableVideoDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCa
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableVideoDiffusionPipelineSlowTests(unittest.TestCase):
|
class StableVideoDiffusionPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -1056,6 +1056,12 @@ class CustomPipelineTests(unittest.TestCase):
|
|||||||
|
|
||||||
|
|
||||||
class PipelineFastTests(unittest.TestCase):
|
class PipelineFastTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
@@ -1673,6 +1679,12 @@ class PipelineFastTests(unittest.TestCase):
|
|||||||
@slow
|
@slow
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class PipelineSlowTests(unittest.TestCase):
|
class PipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
@@ -1898,6 +1910,12 @@ class PipelineSlowTests(unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class PipelineNightlyTests(unittest.TestCase):
|
class PipelineNightlyTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -421,6 +421,12 @@ class UnCLIPPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
|||||||
|
|
||||||
@nightly
|
@nightly
|
||||||
class UnCLIPPipelineCPUIntegrationTests(unittest.TestCase):
|
class UnCLIPPipelineCPUIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
@@ -453,6 +459,12 @@ class UnCLIPPipelineCPUIntegrationTests(unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class UnCLIPPipelineIntegrationTests(unittest.TestCase):
|
class UnCLIPPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -496,6 +496,12 @@ class UnCLIPImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCa
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class UnCLIPImageVariationPipelineIntegrationTests(unittest.TestCase):
|
class UnCLIPImageVariationPipelineIntegrationTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# clean up the VRAM before each test
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -574,6 +574,11 @@ class UniDiffuserPipelineFastTests(
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class UniDiffuserPipelineSlowTests(unittest.TestCase):
|
class UniDiffuserPipelineSlowTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
@@ -690,6 +695,11 @@ class UniDiffuserPipelineSlowTests(unittest.TestCase):
|
|||||||
@nightly
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class UniDiffuserPipelineNightlyTests(unittest.TestCase):
|
class UniDiffuserPipelineNightlyTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
Reference in New Issue
Block a user