mirror of
https://github.com/huggingface/diffusers.git
synced 2025-12-19 19:04:49 +08:00
Compare commits
14 Commits
cache-docs
...
pr-test-sp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eb6d907b14 | ||
|
|
e86248029d | ||
|
|
0cee3621b6 | ||
|
|
1673ece773 | ||
|
|
208b955b9c | ||
|
|
1d9dfd2c19 | ||
|
|
1f217a5440 | ||
|
|
7f0942abb4 | ||
|
|
d87907da30 | ||
|
|
62e2cce917 | ||
|
|
29d8763e68 | ||
|
|
5337132c69 | ||
|
|
f22c75a3a6 | ||
|
|
bebfd619b0 |
8
.github/workflows/pr_tests.yml
vendored
8
.github/workflows/pr_tests.yml
vendored
@@ -79,7 +79,7 @@ jobs:
|
|||||||
config:
|
config:
|
||||||
- name: Fast PyTorch Pipeline CPU tests
|
- name: Fast PyTorch Pipeline CPU tests
|
||||||
framework: pytorch_pipelines
|
framework: pytorch_pipelines
|
||||||
runner: aws-highmemory-32-plus
|
runner: aws-highmemory-64-plus
|
||||||
image: diffusers/diffusers-pytorch-cpu
|
image: diffusers/diffusers-pytorch-cpu
|
||||||
report: torch_cpu_pipelines
|
report: torch_cpu_pipelines
|
||||||
- name: Fast PyTorch Models & Schedulers CPU tests
|
- name: Fast PyTorch Models & Schedulers CPU tests
|
||||||
@@ -125,8 +125,8 @@ jobs:
|
|||||||
- name: Run fast PyTorch Pipeline CPU tests
|
- name: Run fast PyTorch Pipeline CPU tests
|
||||||
if: ${{ matrix.config.framework == 'pytorch_pipelines' }}
|
if: ${{ matrix.config.framework == 'pytorch_pipelines' }}
|
||||||
run: |
|
run: |
|
||||||
pytest -n 8 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 24 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
tests/pipelines
|
tests/pipelines
|
||||||
|
|
||||||
@@ -134,7 +134,7 @@ jobs:
|
|||||||
if: ${{ matrix.config.framework == 'pytorch_models' }}
|
if: ${{ matrix.config.framework == 'pytorch_models' }}
|
||||||
run: |
|
run: |
|
||||||
pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx and not Dependency" \
|
-s -k "not Flax and not Onnx and not Dependency" \
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
tests/models tests/schedulers tests/others
|
tests/models tests/schedulers tests/others
|
||||||
|
|
||||||
|
|||||||
26
.github/workflows/pr_tests_gpu.yml
vendored
26
.github/workflows/pr_tests_gpu.yml
vendored
@@ -1,19 +1,19 @@
|
|||||||
name: Fast GPU Tests on PR
|
name: Fast GPU Tests on PR
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
# pull_request:
|
||||||
branches: main
|
# branches: main
|
||||||
paths:
|
# paths:
|
||||||
- "src/diffusers/models/modeling_utils.py"
|
# - "src/diffusers/models/modeling_utils.py"
|
||||||
- "src/diffusers/models/model_loading_utils.py"
|
# - "src/diffusers/models/model_loading_utils.py"
|
||||||
- "src/diffusers/pipelines/pipeline_utils.py"
|
# - "src/diffusers/pipelines/pipeline_utils.py"
|
||||||
- "src/diffusers/pipeline_loading_utils.py"
|
# - "src/diffusers/pipeline_loading_utils.py"
|
||||||
- "src/diffusers/loaders/lora_base.py"
|
# - "src/diffusers/loaders/lora_base.py"
|
||||||
- "src/diffusers/loaders/lora_pipeline.py"
|
# - "src/diffusers/loaders/lora_pipeline.py"
|
||||||
- "src/diffusers/loaders/peft.py"
|
# - "src/diffusers/loaders/peft.py"
|
||||||
- "tests/pipelines/test_pipelines_common.py"
|
# - "tests/pipelines/test_pipelines_common.py"
|
||||||
- "tests/models/test_modeling_common.py"
|
# - "tests/models/test_modeling_common.py"
|
||||||
- "examples/**/*.py"
|
# - "examples/**/*.py"
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
|
|||||||
@@ -18,10 +18,8 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
|
|||||||
|
|
||||||
import diffusers
|
import diffusers
|
||||||
from diffusers import (
|
from diffusers import (
|
||||||
AsymmetricAutoencoderKL,
|
|
||||||
AutoencoderKL,
|
AutoencoderKL,
|
||||||
AutoencoderTiny,
|
AutoencoderTiny,
|
||||||
ConsistencyDecoderVAE,
|
|
||||||
DDIMScheduler,
|
DDIMScheduler,
|
||||||
DiffusionPipeline,
|
DiffusionPipeline,
|
||||||
FasterCacheConfig,
|
FasterCacheConfig,
|
||||||
@@ -50,12 +48,6 @@ from diffusers.utils import logging
|
|||||||
from diffusers.utils.import_utils import is_xformers_available
|
from diffusers.utils.import_utils import is_xformers_available
|
||||||
from diffusers.utils.source_code_parsing_utils import ReturnNameVisitor
|
from diffusers.utils.source_code_parsing_utils import ReturnNameVisitor
|
||||||
|
|
||||||
from ..models.autoencoders.vae import (
|
|
||||||
get_asym_autoencoder_kl_config,
|
|
||||||
get_autoencoder_kl_config,
|
|
||||||
get_autoencoder_tiny_config,
|
|
||||||
get_consistency_vae_config,
|
|
||||||
)
|
|
||||||
from ..models.transformers.test_models_transformer_flux import create_flux_ip_adapter_state_dict
|
from ..models.transformers.test_models_transformer_flux import create_flux_ip_adapter_state_dict
|
||||||
from ..models.unets.test_models_unet_2d_condition import (
|
from ..models.unets.test_models_unet_2d_condition import (
|
||||||
create_ip_adapter_faceid_state_dict,
|
create_ip_adapter_faceid_state_dict,
|
||||||
@@ -72,7 +64,6 @@ from ..testing_utils import (
|
|||||||
require_torch,
|
require_torch,
|
||||||
require_torch_accelerator,
|
require_torch_accelerator,
|
||||||
require_transformers_version_greater,
|
require_transformers_version_greater,
|
||||||
skip_mps,
|
|
||||||
torch_device,
|
torch_device,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -176,46 +167,6 @@ class SDFunctionTesterMixin:
|
|||||||
zeros = torch.zeros(shape).to(torch_device)
|
zeros = torch.zeros(shape).to(torch_device)
|
||||||
pipe.vae.decode(zeros)
|
pipe.vae.decode(zeros)
|
||||||
|
|
||||||
# MPS currently doesn't support ComplexFloats, which are required for FreeU - see https://github.com/huggingface/diffusers/issues/7569.
|
|
||||||
@skip_mps
|
|
||||||
def test_freeu(self):
|
|
||||||
components = self.get_dummy_components()
|
|
||||||
pipe = self.pipeline_class(**components)
|
|
||||||
pipe = pipe.to(torch_device)
|
|
||||||
pipe.set_progress_bar_config(disable=None)
|
|
||||||
|
|
||||||
# Normal inference
|
|
||||||
inputs = self.get_dummy_inputs(torch_device)
|
|
||||||
inputs["return_dict"] = False
|
|
||||||
inputs["output_type"] = "np"
|
|
||||||
output = pipe(**inputs)[0]
|
|
||||||
|
|
||||||
# FreeU-enabled inference
|
|
||||||
pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4)
|
|
||||||
inputs = self.get_dummy_inputs(torch_device)
|
|
||||||
inputs["return_dict"] = False
|
|
||||||
inputs["output_type"] = "np"
|
|
||||||
output_freeu = pipe(**inputs)[0]
|
|
||||||
|
|
||||||
# FreeU-disabled inference
|
|
||||||
pipe.disable_freeu()
|
|
||||||
freeu_keys = {"s1", "s2", "b1", "b2"}
|
|
||||||
for upsample_block in pipe.unet.up_blocks:
|
|
||||||
for key in freeu_keys:
|
|
||||||
assert getattr(upsample_block, key) is None, f"Disabling of FreeU should have set {key} to None."
|
|
||||||
|
|
||||||
inputs = self.get_dummy_inputs(torch_device)
|
|
||||||
inputs["return_dict"] = False
|
|
||||||
inputs["output_type"] = "np"
|
|
||||||
output_no_freeu = pipe(**inputs)[0]
|
|
||||||
|
|
||||||
assert not np.allclose(output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1]), (
|
|
||||||
"Enabling of FreeU should lead to different results."
|
|
||||||
)
|
|
||||||
assert np.allclose(output, output_no_freeu, atol=1e-2), (
|
|
||||||
f"Disabling of FreeU should lead to results similar to the default pipeline results but Max Abs Error={np.abs(output_no_freeu - output).max()}."
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_fused_qkv_projections(self):
|
def test_fused_qkv_projections(self):
|
||||||
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
||||||
components = self.get_dummy_components()
|
components = self.get_dummy_components()
|
||||||
@@ -775,34 +726,6 @@ class PipelineLatentTesterMixin:
|
|||||||
max_diff = np.abs(out - out_latents_inputs).max()
|
max_diff = np.abs(out - out_latents_inputs).max()
|
||||||
self.assertLess(max_diff, 1e-4, "passing latents as image input generate different result from passing image")
|
self.assertLess(max_diff, 1e-4, "passing latents as image input generate different result from passing image")
|
||||||
|
|
||||||
def test_multi_vae(self):
|
|
||||||
components = self.get_dummy_components()
|
|
||||||
pipe = self.pipeline_class(**components)
|
|
||||||
pipe = pipe.to(torch_device)
|
|
||||||
pipe.set_progress_bar_config(disable=None)
|
|
||||||
|
|
||||||
block_out_channels = pipe.vae.config.block_out_channels
|
|
||||||
norm_num_groups = pipe.vae.config.norm_num_groups
|
|
||||||
|
|
||||||
vae_classes = [AutoencoderKL, AsymmetricAutoencoderKL, ConsistencyDecoderVAE, AutoencoderTiny]
|
|
||||||
configs = [
|
|
||||||
get_autoencoder_kl_config(block_out_channels, norm_num_groups),
|
|
||||||
get_asym_autoencoder_kl_config(block_out_channels, norm_num_groups),
|
|
||||||
get_consistency_vae_config(block_out_channels, norm_num_groups),
|
|
||||||
get_autoencoder_tiny_config(block_out_channels),
|
|
||||||
]
|
|
||||||
|
|
||||||
out_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0]
|
|
||||||
|
|
||||||
for vae_cls, config in zip(vae_classes, configs):
|
|
||||||
vae = vae_cls(**config)
|
|
||||||
vae = vae.to(torch_device)
|
|
||||||
components["vae"] = vae
|
|
||||||
vae_pipe = self.pipeline_class(**components)
|
|
||||||
out_vae_np = vae_pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0]
|
|
||||||
|
|
||||||
assert out_vae_np.shape == out_np.shape
|
|
||||||
|
|
||||||
|
|
||||||
@require_torch
|
@require_torch
|
||||||
class PipelineFromPipeTesterMixin:
|
class PipelineFromPipeTesterMixin:
|
||||||
@@ -1153,6 +1076,15 @@ class PipelineTesterMixin:
|
|||||||
gc.collect()
|
gc.collect()
|
||||||
backend_empty_cache(torch_device)
|
backend_empty_cache(torch_device)
|
||||||
|
|
||||||
|
def get_base_pipeline_output(self, pipe):
|
||||||
|
if not hasattr(self, "_base_pipeline_output"):
|
||||||
|
inputs = self.get_dummy_inputs(torch_device)
|
||||||
|
inputs["generator"] = self.get_generator(0)
|
||||||
|
output = pipe(**inputs)[0]
|
||||||
|
self._base_pipeline_output = output
|
||||||
|
|
||||||
|
return self._base_pipeline_output
|
||||||
|
|
||||||
def test_save_load_local(self, expected_max_difference=5e-4):
|
def test_save_load_local(self, expected_max_difference=5e-4):
|
||||||
components = self.get_dummy_components()
|
components = self.get_dummy_components()
|
||||||
pipe = self.pipeline_class(**components)
|
pipe = self.pipeline_class(**components)
|
||||||
@@ -1164,7 +1096,7 @@ class PipelineTesterMixin:
|
|||||||
pipe.set_progress_bar_config(disable=None)
|
pipe.set_progress_bar_config(disable=None)
|
||||||
|
|
||||||
inputs = self.get_dummy_inputs(torch_device)
|
inputs = self.get_dummy_inputs(torch_device)
|
||||||
output = pipe(**inputs)[0]
|
output = self.get_base_pipeline_output(pipe)
|
||||||
|
|
||||||
logger = logging.get_logger("diffusers.pipelines.pipeline_utils")
|
logger = logging.get_logger("diffusers.pipelines.pipeline_utils")
|
||||||
logger.setLevel(diffusers.logging.INFO)
|
logger.setLevel(diffusers.logging.INFO)
|
||||||
@@ -1283,7 +1215,7 @@ class PipelineTesterMixin:
|
|||||||
output = pipe(**batched_input)
|
output = pipe(**batched_input)
|
||||||
assert len(output[0]) == batch_size
|
assert len(output[0]) == batch_size
|
||||||
|
|
||||||
def test_inference_batch_single_identical(self, batch_size=3, expected_max_diff=1e-4):
|
def test_inference_batch_single_identical(self, batch_size=2, expected_max_diff=1e-4):
|
||||||
self._test_inference_batch_single_identical(batch_size=batch_size, expected_max_diff=expected_max_diff)
|
self._test_inference_batch_single_identical(batch_size=batch_size, expected_max_diff=expected_max_diff)
|
||||||
|
|
||||||
def _test_inference_batch_single_identical(
|
def _test_inference_batch_single_identical(
|
||||||
@@ -1402,7 +1334,7 @@ class PipelineTesterMixin:
|
|||||||
# Reset generator in case it is used inside dummy inputs
|
# Reset generator in case it is used inside dummy inputs
|
||||||
if "generator" in inputs:
|
if "generator" in inputs:
|
||||||
inputs["generator"] = self.get_generator(0)
|
inputs["generator"] = self.get_generator(0)
|
||||||
output = pipe(**inputs)[0]
|
output = self.get_base_pipeline_output(pipe)
|
||||||
|
|
||||||
fp16_inputs = self.get_dummy_inputs(torch_device)
|
fp16_inputs = self.get_dummy_inputs(torch_device)
|
||||||
# Reset generator in case it is used inside dummy inputs
|
# Reset generator in case it is used inside dummy inputs
|
||||||
@@ -1433,7 +1365,7 @@ class PipelineTesterMixin:
|
|||||||
pipe.set_progress_bar_config(disable=None)
|
pipe.set_progress_bar_config(disable=None)
|
||||||
|
|
||||||
inputs = self.get_dummy_inputs(torch_device)
|
inputs = self.get_dummy_inputs(torch_device)
|
||||||
output = pipe(**inputs)[0]
|
output = self.get_base_pipeline_output(pipe)
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdir:
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||||||
pipe.save_pretrained(tmpdir)
|
pipe.save_pretrained(tmpdir)
|
||||||
@@ -1476,7 +1408,7 @@ class PipelineTesterMixin:
|
|||||||
generator_device = "cpu"
|
generator_device = "cpu"
|
||||||
inputs = self.get_dummy_inputs(generator_device)
|
inputs = self.get_dummy_inputs(generator_device)
|
||||||
torch.manual_seed(0)
|
torch.manual_seed(0)
|
||||||
output = pipe(**inputs)[0]
|
output = self.get_base_pipeline_output(pipe)
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdir:
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||||||
pipe.save_pretrained(tmpdir, safe_serialization=False)
|
pipe.save_pretrained(tmpdir, safe_serialization=False)
|
||||||
|
|||||||
Reference in New Issue
Block a user