mirror of
https://github.com/huggingface/diffusers.git
synced 2025-12-20 19:34:48 +08:00
Compare commits
8 Commits
diffusers-
...
cleanup-te
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
181b4d335f | ||
|
|
214f2bb076 | ||
|
|
ffab17e024 | ||
|
|
e1e8a3efde | ||
|
|
0bbf77da9a | ||
|
|
20944c2566 | ||
|
|
19fa1cbd84 | ||
|
|
a30ef032a3 |
@@ -22,7 +22,7 @@ import torch
|
|||||||
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
|
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
|
||||||
|
|
||||||
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet2DConditionModel
|
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet2DConditionModel
|
||||||
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
|
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device, nightly
|
||||||
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
|
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
|
||||||
|
|
||||||
from ..pipeline_params import (
|
from ..pipeline_params import (
|
||||||
@@ -186,15 +186,53 @@ class CycleDiffusionPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterM
|
|||||||
return super().test_attention_slicing_forward_pass()
|
return super().test_attention_slicing_forward_pass()
|
||||||
|
|
||||||
|
|
||||||
@slow
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class CycleDiffusionPipelineIntegrationTests(unittest.TestCase):
|
class CycleDiffusionPipelineNightlyTests(unittest.TestCase):
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
|
def test_cycle_diffusion_pipeline(self):
|
||||||
|
init_image = load_image(
|
||||||
|
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
||||||
|
"/cycle-diffusion/black_colored_car.png"
|
||||||
|
)
|
||||||
|
expected_image = load_numpy(
|
||||||
|
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy"
|
||||||
|
)
|
||||||
|
init_image = init_image.resize((512, 512))
|
||||||
|
|
||||||
|
model_id = "CompVis/stable-diffusion-v1-4"
|
||||||
|
scheduler = DDIMScheduler.from_pretrained(model_id, subfolder="scheduler")
|
||||||
|
pipe = CycleDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, safety_checker=None)
|
||||||
|
|
||||||
|
pipe.to(torch_device)
|
||||||
|
pipe.set_progress_bar_config(disable=None)
|
||||||
|
pipe.enable_attention_slicing()
|
||||||
|
|
||||||
|
source_prompt = "A black colored car"
|
||||||
|
prompt = "A blue colored car"
|
||||||
|
|
||||||
|
generator = torch.manual_seed(0)
|
||||||
|
output = pipe(
|
||||||
|
prompt=prompt,
|
||||||
|
source_prompt=source_prompt,
|
||||||
|
image=init_image,
|
||||||
|
num_inference_steps=100,
|
||||||
|
eta=0.1,
|
||||||
|
strength=0.85,
|
||||||
|
guidance_scale=3,
|
||||||
|
source_guidance_scale=1,
|
||||||
|
generator=generator,
|
||||||
|
output_type="np",
|
||||||
|
)
|
||||||
|
image = output.images
|
||||||
|
|
||||||
|
assert np.abs(image - expected_image).max() < 2e-2
|
||||||
|
|
||||||
def test_cycle_diffusion_pipeline_fp16(self):
|
def test_cycle_diffusion_pipeline_fp16(self):
|
||||||
init_image = load_image(
|
init_image = load_image(
|
||||||
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
||||||
@@ -235,41 +273,3 @@ class CycleDiffusionPipelineIntegrationTests(unittest.TestCase):
|
|||||||
|
|
||||||
# the values aren't exactly equal, but the images look the same visually
|
# the values aren't exactly equal, but the images look the same visually
|
||||||
assert np.abs(image - expected_image).max() < 5e-1
|
assert np.abs(image - expected_image).max() < 5e-1
|
||||||
|
|
||||||
def test_cycle_diffusion_pipeline(self):
|
|
||||||
init_image = load_image(
|
|
||||||
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
|
||||||
"/cycle-diffusion/black_colored_car.png"
|
|
||||||
)
|
|
||||||
expected_image = load_numpy(
|
|
||||||
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy"
|
|
||||||
)
|
|
||||||
init_image = init_image.resize((512, 512))
|
|
||||||
|
|
||||||
model_id = "CompVis/stable-diffusion-v1-4"
|
|
||||||
scheduler = DDIMScheduler.from_pretrained(model_id, subfolder="scheduler")
|
|
||||||
pipe = CycleDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, safety_checker=None)
|
|
||||||
|
|
||||||
pipe.to(torch_device)
|
|
||||||
pipe.set_progress_bar_config(disable=None)
|
|
||||||
pipe.enable_attention_slicing()
|
|
||||||
|
|
||||||
source_prompt = "A black colored car"
|
|
||||||
prompt = "A blue colored car"
|
|
||||||
|
|
||||||
generator = torch.manual_seed(0)
|
|
||||||
output = pipe(
|
|
||||||
prompt=prompt,
|
|
||||||
source_prompt=source_prompt,
|
|
||||||
image=init_image,
|
|
||||||
num_inference_steps=100,
|
|
||||||
eta=0.1,
|
|
||||||
strength=0.85,
|
|
||||||
guidance_scale=3,
|
|
||||||
source_guidance_scale=1,
|
|
||||||
generator=generator,
|
|
||||||
output_type="np",
|
|
||||||
)
|
|
||||||
image = output.images
|
|
||||||
|
|
||||||
assert np.abs(image - expected_image).max() < 2e-2
|
|
||||||
|
|||||||
@@ -419,33 +419,6 @@ class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase):
|
|||||||
for module in pipe.text_encoder, pipe.unet, pipe.vae:
|
for module in pipe.text_encoder, pipe.unet, pipe.vae:
|
||||||
assert module.device == torch.device("cpu")
|
assert module.device == torch.device("cpu")
|
||||||
|
|
||||||
def test_img2img_2nd_order(self):
|
|
||||||
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
|
||||||
sd_pipe.scheduler = HeunDiscreteScheduler.from_config(sd_pipe.scheduler.config)
|
|
||||||
sd_pipe.to(torch_device)
|
|
||||||
sd_pipe.set_progress_bar_config(disable=None)
|
|
||||||
|
|
||||||
inputs = self.get_inputs(torch_device)
|
|
||||||
inputs["num_inference_steps"] = 10
|
|
||||||
inputs["strength"] = 0.75
|
|
||||||
image = sd_pipe(**inputs).images[0]
|
|
||||||
|
|
||||||
expected_image = load_numpy(
|
|
||||||
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/img2img_heun.npy"
|
|
||||||
)
|
|
||||||
max_diff = np.abs(expected_image - image).max()
|
|
||||||
assert max_diff < 5e-2
|
|
||||||
|
|
||||||
inputs = self.get_inputs(torch_device)
|
|
||||||
inputs["num_inference_steps"] = 11
|
|
||||||
inputs["strength"] = 0.75
|
|
||||||
image_other = sd_pipe(**inputs).images[0]
|
|
||||||
|
|
||||||
mean_diff = np.abs(image - image_other).mean()
|
|
||||||
|
|
||||||
# images should be very similar
|
|
||||||
assert mean_diff < 5e-2
|
|
||||||
|
|
||||||
def test_stable_diffusion_img2img_pipeline_multiple_of_8(self):
|
def test_stable_diffusion_img2img_pipeline_multiple_of_8(self):
|
||||||
init_image = load_image(
|
init_image = load_image(
|
||||||
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
||||||
@@ -596,3 +569,30 @@ class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
max_diff = np.abs(expected_image - image).max()
|
max_diff = np.abs(expected_image - image).max()
|
||||||
assert max_diff < 1e-3
|
assert max_diff < 1e-3
|
||||||
|
|
||||||
|
def test_img2img_2nd_order(self):
|
||||||
|
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
||||||
|
sd_pipe.scheduler = HeunDiscreteScheduler.from_config(sd_pipe.scheduler.config)
|
||||||
|
sd_pipe.to(torch_device)
|
||||||
|
sd_pipe.set_progress_bar_config(disable=None)
|
||||||
|
|
||||||
|
inputs = self.get_inputs(torch_device)
|
||||||
|
inputs["num_inference_steps"] = 10
|
||||||
|
inputs["strength"] = 0.75
|
||||||
|
image = sd_pipe(**inputs).images[0]
|
||||||
|
|
||||||
|
expected_image = load_numpy(
|
||||||
|
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/img2img_heun.npy"
|
||||||
|
)
|
||||||
|
max_diff = np.abs(expected_image - image).max()
|
||||||
|
assert max_diff < 5e-2
|
||||||
|
|
||||||
|
inputs = self.get_inputs(torch_device)
|
||||||
|
inputs["num_inference_steps"] = 11
|
||||||
|
inputs["strength"] = 0.75
|
||||||
|
image_other = sd_pipe(**inputs).images[0]
|
||||||
|
|
||||||
|
mean_diff = np.abs(image - image_other).mean()
|
||||||
|
|
||||||
|
# images should be very similar
|
||||||
|
assert mean_diff < 5e-2
|
||||||
|
|||||||
@@ -20,16 +20,16 @@ import numpy as np
|
|||||||
import torch
|
import torch
|
||||||
|
|
||||||
from diffusers import StableDiffusionKDiffusionPipeline
|
from diffusers import StableDiffusionKDiffusionPipeline
|
||||||
from diffusers.utils import slow, torch_device
|
from diffusers.utils import slow, nightly, torch_device
|
||||||
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
|
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
|
||||||
|
|
||||||
|
|
||||||
enable_full_determinism()
|
enable_full_determinism()
|
||||||
|
|
||||||
|
|
||||||
@slow
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionPipelineIntegrationTests(unittest.TestCase):
|
class StableDiffusionKDiffusionPipelineNightlyTests(unittest.TestCase):
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ from diffusers import (
|
|||||||
StableDiffusionPanoramaPipeline,
|
StableDiffusionPanoramaPipeline,
|
||||||
UNet2DConditionModel,
|
UNet2DConditionModel,
|
||||||
)
|
)
|
||||||
from diffusers.utils import slow, torch_device
|
from diffusers.utils import slow, torch_device, nightly
|
||||||
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
|
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
|
||||||
|
|
||||||
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
|
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
|
||||||
@@ -245,9 +245,9 @@ class StableDiffusionPanoramaPipelineFastTests(PipelineLatentTesterMixin, Pipeli
|
|||||||
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
||||||
|
|
||||||
|
|
||||||
@slow
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionPanoramaSlowTests(unittest.TestCase):
|
class StableDiffusionPanoramaNightlyTests(unittest.TestCase):
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
@@ -268,6 +268,7 @@ class StableDiffusionPanoramaSlowTests(unittest.TestCase):
|
|||||||
model_ckpt = "stabilityai/stable-diffusion-2-base"
|
model_ckpt = "stabilityai/stable-diffusion-2-base"
|
||||||
scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler")
|
scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler")
|
||||||
pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None)
|
pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None)
|
||||||
|
pipe.unet.set_default_attn_processor()
|
||||||
pipe.to(torch_device)
|
pipe.to(torch_device)
|
||||||
pipe.set_progress_bar_config(disable=None)
|
pipe.set_progress_bar_config(disable=None)
|
||||||
pipe.enable_attention_slicing()
|
pipe.enable_attention_slicing()
|
||||||
@@ -298,6 +299,7 @@ class StableDiffusionPanoramaSlowTests(unittest.TestCase):
|
|||||||
pipe = StableDiffusionPanoramaPipeline.from_pretrained(
|
pipe = StableDiffusionPanoramaPipeline.from_pretrained(
|
||||||
"stabilityai/stable-diffusion-2-base", safety_checker=None
|
"stabilityai/stable-diffusion-2-base", safety_checker=None
|
||||||
)
|
)
|
||||||
|
pipe.unet.set_default_attn_processor()
|
||||||
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
||||||
pipe.unet.set_default_attn_processor()
|
pipe.unet.set_default_attn_processor()
|
||||||
pipe.to(torch_device)
|
pipe.to(torch_device)
|
||||||
@@ -379,6 +381,7 @@ class StableDiffusionPanoramaSlowTests(unittest.TestCase):
|
|||||||
model_ckpt = "stabilityai/stable-diffusion-2-base"
|
model_ckpt = "stabilityai/stable-diffusion-2-base"
|
||||||
scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler")
|
scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler")
|
||||||
pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None)
|
pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None)
|
||||||
|
pipe.unet.set_default_attn_processor()
|
||||||
pipe = pipe.to(torch_device)
|
pipe = pipe.to(torch_device)
|
||||||
pipe.set_progress_bar_config(disable=None)
|
pipe.set_progress_bar_config(disable=None)
|
||||||
pipe.enable_attention_slicing()
|
pipe.enable_attention_slicing()
|
||||||
@@ -396,6 +399,7 @@ class StableDiffusionPanoramaSlowTests(unittest.TestCase):
|
|||||||
model_ckpt = "stabilityai/stable-diffusion-2-base"
|
model_ckpt = "stabilityai/stable-diffusion-2-base"
|
||||||
scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler")
|
scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler")
|
||||||
pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None)
|
pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None)
|
||||||
|
pipe.unet.set_default_attn_processor()
|
||||||
pipe = pipe.to(torch_device)
|
pipe = pipe.to(torch_device)
|
||||||
pipe.set_progress_bar_config(disable=None)
|
pipe.set_progress_bar_config(disable=None)
|
||||||
pipe.enable_attention_slicing(1)
|
pipe.enable_attention_slicing(1)
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ from diffusers import (
|
|||||||
UNet2DConditionModel,
|
UNet2DConditionModel,
|
||||||
)
|
)
|
||||||
from diffusers.image_processor import VaeImageProcessor
|
from diffusers.image_processor import VaeImageProcessor
|
||||||
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
|
from diffusers.utils import floats_tensor, load_numpy, slow, nightly, torch_device
|
||||||
from diffusers.utils.testing_utils import enable_full_determinism, load_image, load_pt, require_torch_gpu, skip_mps
|
from diffusers.utils.testing_utils import enable_full_determinism, load_image, load_pt, require_torch_gpu, skip_mps
|
||||||
|
|
||||||
from ..pipeline_params import (
|
from ..pipeline_params import (
|
||||||
@@ -499,6 +499,7 @@ class InversionPipelineSlowTests(unittest.TestCase):
|
|||||||
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
|
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
|
||||||
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
|
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
|
||||||
)
|
)
|
||||||
|
pipe.unet.set_default_attn_processor()
|
||||||
pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config)
|
pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config)
|
||||||
|
|
||||||
caption = "a photography of a cat with flowers"
|
caption = "a photography of a cat with flowers"
|
||||||
@@ -521,6 +522,7 @@ class InversionPipelineSlowTests(unittest.TestCase):
|
|||||||
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
|
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
|
||||||
"stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16
|
"stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16
|
||||||
)
|
)
|
||||||
|
pipe.unet.set_default_attn_processor()
|
||||||
pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config)
|
pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config)
|
||||||
|
|
||||||
caption = "a photography of a cat with flowers"
|
caption = "a photography of a cat with flowers"
|
||||||
@@ -535,10 +537,19 @@ class InversionPipelineSlowTests(unittest.TestCase):
|
|||||||
image_slice = inv_latents[0, -3:, -3:, -1].flatten()
|
image_slice = inv_latents[0, -3:, -3:, -1].flatten()
|
||||||
|
|
||||||
assert inv_latents.shape == (1, 4, 64, 64)
|
assert inv_latents.shape == (1, 4, 64, 64)
|
||||||
expected_slice = np.array([0.8970, -0.1611, 0.4766, -1.1162, -0.5923, 0.1050, -0.9678, 1.0537, -0.6050])
|
expected_slice = np.array([0.8452, -0.1538, 0.4570, -1.0918, -0.6138, 0.1060, -1.0088, 1.0371, -0.5713])
|
||||||
|
|
||||||
assert np.abs(expected_slice - image_slice.cpu().numpy()).max() < 5e-2
|
assert np.abs(expected_slice - image_slice.cpu().numpy()).max() < 5e-2
|
||||||
|
|
||||||
|
|
||||||
|
@nightly
|
||||||
|
@require_torch_gpu
|
||||||
|
class InversionPipelineNightlyTests(unittest.TestCase):
|
||||||
|
def tearDown(self):
|
||||||
|
super().tearDown()
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def test_stable_diffusion_pix2pix_full(self):
|
def test_stable_diffusion_pix2pix_full(self):
|
||||||
# numpy array of https://huggingface.co/datasets/hf-internal-testing/diffusers-images/blob/main/pix2pix/dog.png
|
# numpy array of https://huggingface.co/datasets/hf-internal-testing/diffusers-images/blob/main/pix2pix/dog.png
|
||||||
expected_image = load_numpy(
|
expected_image = load_numpy(
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ from diffusers import (
|
|||||||
StableDiffusionSAGPipeline,
|
StableDiffusionSAGPipeline,
|
||||||
UNet2DConditionModel,
|
UNet2DConditionModel,
|
||||||
)
|
)
|
||||||
from diffusers.utils import slow, torch_device
|
from diffusers.utils import slow, nightly, torch_device
|
||||||
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
|
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
|
||||||
|
|
||||||
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
|
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
|
||||||
@@ -116,9 +116,9 @@ class StableDiffusionSAGPipelineFastTests(PipelineLatentTesterMixin, PipelineTes
|
|||||||
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
|
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
|
||||||
|
|
||||||
|
|
||||||
@slow
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusionPipelineIntegrationTests(unittest.TestCase):
|
class StableDiffusionSAGPipelineNightlyTests(unittest.TestCase):
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ from diffusers import (
|
|||||||
UNet2DConditionModel,
|
UNet2DConditionModel,
|
||||||
)
|
)
|
||||||
from diffusers.models.attention_processor import AttnProcessor
|
from diffusers.models.attention_processor import AttnProcessor
|
||||||
from diffusers.utils import load_numpy, slow, torch_device
|
from diffusers.utils import load_numpy, slow, nightly, torch_device
|
||||||
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
|
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
|
||||||
|
|
||||||
|
|
||||||
@@ -242,9 +242,9 @@ class StableDiffusion2VPredictionPipelineFastTests(unittest.TestCase):
|
|||||||
assert image.shape == (1, 64, 64, 3)
|
assert image.shape == (1, 64, 64, 3)
|
||||||
|
|
||||||
|
|
||||||
@slow
|
@nightly
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
|
class StableDiffusion2VPredictionPipelineNightlyTests(unittest.TestCase):
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
# clean up the VRAM after each test
|
# clean up the VRAM after each test
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
@@ -253,6 +253,7 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
|
|||||||
|
|
||||||
def test_stable_diffusion_v_pred_default(self):
|
def test_stable_diffusion_v_pred_default(self):
|
||||||
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2")
|
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2")
|
||||||
|
sd_pipe.unet.set_default_attn_processor()
|
||||||
sd_pipe = sd_pipe.to(torch_device)
|
sd_pipe = sd_pipe.to(torch_device)
|
||||||
sd_pipe.enable_attention_slicing()
|
sd_pipe.enable_attention_slicing()
|
||||||
sd_pipe.set_progress_bar_config(disable=None)
|
sd_pipe.set_progress_bar_config(disable=None)
|
||||||
@@ -273,6 +274,7 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
|
|||||||
sd_pipe = StableDiffusionPipeline.from_pretrained(
|
sd_pipe = StableDiffusionPipeline.from_pretrained(
|
||||||
"stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16
|
"stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16
|
||||||
)
|
)
|
||||||
|
sd_pipe.unet.set_default_attn_processor()
|
||||||
sd_pipe = sd_pipe.to(torch_device)
|
sd_pipe = sd_pipe.to(torch_device)
|
||||||
sd_pipe.enable_attention_slicing()
|
sd_pipe.enable_attention_slicing()
|
||||||
sd_pipe.set_progress_bar_config(disable=None)
|
sd_pipe.set_progress_bar_config(disable=None)
|
||||||
@@ -292,6 +294,7 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
|
|||||||
def test_stable_diffusion_v_pred_euler(self):
|
def test_stable_diffusion_v_pred_euler(self):
|
||||||
scheduler = EulerDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-2", subfolder="scheduler")
|
scheduler = EulerDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-2", subfolder="scheduler")
|
||||||
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", scheduler=scheduler)
|
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", scheduler=scheduler)
|
||||||
|
sd_pipe.unet.set_default_attn_processor()
|
||||||
sd_pipe = sd_pipe.to(torch_device)
|
sd_pipe = sd_pipe.to(torch_device)
|
||||||
sd_pipe.enable_attention_slicing()
|
sd_pipe.enable_attention_slicing()
|
||||||
sd_pipe.set_progress_bar_config(disable=None)
|
sd_pipe.set_progress_bar_config(disable=None)
|
||||||
@@ -317,6 +320,7 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
|
|||||||
"stabilityai/stable-diffusion-2", subfolder="scheduler"
|
"stabilityai/stable-diffusion-2", subfolder="scheduler"
|
||||||
)
|
)
|
||||||
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", scheduler=scheduler)
|
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", scheduler=scheduler)
|
||||||
|
sd_pipe.unet.set_default_attn_processor()
|
||||||
sd_pipe = sd_pipe.to(torch_device)
|
sd_pipe = sd_pipe.to(torch_device)
|
||||||
sd_pipe.enable_attention_slicing()
|
sd_pipe.enable_attention_slicing()
|
||||||
sd_pipe.set_progress_bar_config(disable=None)
|
sd_pipe.set_progress_bar_config(disable=None)
|
||||||
@@ -337,6 +341,7 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
|
|||||||
torch.cuda.reset_peak_memory_stats()
|
torch.cuda.reset_peak_memory_stats()
|
||||||
model_id = "stabilityai/stable-diffusion-2"
|
model_id = "stabilityai/stable-diffusion-2"
|
||||||
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
||||||
|
pipe.unet.set_default_attn_processor()
|
||||||
pipe.to(torch_device)
|
pipe.to(torch_device)
|
||||||
pipe.set_progress_bar_config(disable=None)
|
pipe.set_progress_bar_config(disable=None)
|
||||||
|
|
||||||
@@ -373,6 +378,7 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2")
|
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2")
|
||||||
|
pipe.unet.set_default_attn_processor()
|
||||||
pipe.to(torch_device)
|
pipe.to(torch_device)
|
||||||
pipe.enable_attention_slicing()
|
pipe.enable_attention_slicing()
|
||||||
pipe.set_progress_bar_config(disable=None)
|
pipe.set_progress_bar_config(disable=None)
|
||||||
@@ -416,6 +422,7 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16)
|
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16)
|
||||||
|
pipe.unet.set_default_attn_processor()
|
||||||
pipe.to(torch_device)
|
pipe.to(torch_device)
|
||||||
pipe.set_progress_bar_config(disable=None)
|
pipe.set_progress_bar_config(disable=None)
|
||||||
|
|
||||||
@@ -432,8 +439,9 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
|
|||||||
filename = hf_hub_download("stabilityai/stable-diffusion-2-1", filename="v2-1_768-ema-pruned.safetensors")
|
filename = hf_hub_download("stabilityai/stable-diffusion-2-1", filename="v2-1_768-ema-pruned.safetensors")
|
||||||
|
|
||||||
pipe = StableDiffusionPipeline.from_single_file(filename, torch_dtype=torch.float16)
|
pipe = StableDiffusionPipeline.from_single_file(filename, torch_dtype=torch.float16)
|
||||||
|
pipe.unet.set_default_attn_processor()
|
||||||
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
||||||
pipe.to("cuda")
|
pipe.enable_model_cpu_offload()
|
||||||
|
|
||||||
image_out = pipe("test", num_inference_steps=1, output_type="np").images[0]
|
image_out = pipe("test", num_inference_steps=1, output_type="np").images[0]
|
||||||
|
|
||||||
@@ -447,7 +455,7 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
|
|||||||
pipe_single = StableDiffusionPipeline.from_single_file(single_file_path)
|
pipe_single = StableDiffusionPipeline.from_single_file(single_file_path)
|
||||||
pipe_single.scheduler = DDIMScheduler.from_config(pipe_single.scheduler.config)
|
pipe_single.scheduler = DDIMScheduler.from_config(pipe_single.scheduler.config)
|
||||||
pipe_single.unet.set_attn_processor(AttnProcessor())
|
pipe_single.unet.set_attn_processor(AttnProcessor())
|
||||||
pipe_single.to("cuda")
|
pipe.enable_model_cpu_offload()
|
||||||
|
|
||||||
generator = torch.Generator(device="cpu").manual_seed(0)
|
generator = torch.Generator(device="cpu").manual_seed(0)
|
||||||
image_ckpt = pipe_single("a turtle", num_inference_steps=5, generator=generator, output_type="np").images[0]
|
image_ckpt = pipe_single("a turtle", num_inference_steps=5, generator=generator, output_type="np").images[0]
|
||||||
|
|||||||
Reference in New Issue
Block a user