Compare commits

...

6 Commits

Author SHA1 Message Date
DN6
a957ea1f36 update 2025-09-30 12:36:17 +05:30
DN6
9938985426 update 2025-09-30 12:28:19 +05:30
DN6
6a47dd0c04 update 2025-09-30 11:23:05 +05:30
Dhruv Nair
05a08dff10 Merge branch 'main' into sf-test-mixin 2025-09-24 06:02:52 +02:00
DN6
1f6defd7d6 update 2025-09-19 13:27:08 +05:30
DN6
710e18b951 update 2025-09-11 12:53:58 +05:30
23 changed files with 173 additions and 390 deletions

View File

@@ -1,3 +1,4 @@
import gc
import tempfile
from io import BytesIO
@@ -9,7 +10,10 @@ from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_nam
from diffusers.models.attention_processor import AttnProcessor
from ..testing_utils import (
backend_empty_cache,
nightly,
numpy_cosine_similarity_distance,
require_torch_accelerator,
torch_device,
)
@@ -47,6 +51,93 @@ def download_diffusers_config(repo_id, tmpdir):
return path
@nightly
@require_torch_accelerator
class SingleFileModelTesterMixin:
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_model_config(self):
pretrained_kwargs = {}
single_file_kwargs = {}
if hasattr(self, "subfolder") and self.subfolder:
pretrained_kwargs["subfolder"] = self.subfolder
if hasattr(self, "torch_dtype") and self.torch_dtype:
pretrained_kwargs["torch_dtype"] = self.torch_dtype
single_file_kwargs["torch_dtype"] = self.torch_dtype
model = self.model_class.from_pretrained(self.repo_id, **pretrained_kwargs)
model_single_file = self.model_class.from_single_file(self.ckpt_path, **single_file_kwargs)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)
def test_single_file_model_parameters(self):
pretrained_kwargs = {}
single_file_kwargs = {}
if hasattr(self, "subfolder") and self.subfolder:
pretrained_kwargs["subfolder"] = self.subfolder
if hasattr(self, "torch_dtype") and self.torch_dtype:
pretrained_kwargs["torch_dtype"] = self.torch_dtype
single_file_kwargs["torch_dtype"] = self.torch_dtype
model = self.model_class.from_pretrained(self.repo_id, **pretrained_kwargs)
model_single_file = self.model_class.from_single_file(self.ckpt_path, **single_file_kwargs)
state_dict = model.state_dict()
state_dict_single_file = model_single_file.state_dict()
assert set(state_dict.keys()) == set(state_dict_single_file.keys()), (
"Model parameters keys differ between pretrained and single file loading"
)
for key in state_dict.keys():
param = state_dict[key]
param_single_file = state_dict_single_file[key]
assert param.shape == param_single_file.shape, (
f"Parameter shape mismatch for {key}: "
f"pretrained {param.shape} vs single file {param_single_file.shape}"
)
assert torch.allclose(param, param_single_file, rtol=1e-5, atol=1e-5), (
f"Parameter values differ for {key}: "
f"max difference {torch.max(torch.abs(param - param_single_file)).item()}"
)
def test_checkpoint_altered_keys_loading(self):
# Test loading with checkpoints that have altered keys
if not hasattr(self, "alternate_keys_ckpt_paths") or not self.alternate_keys_ckpt_paths:
return
for ckpt_path in self.alternate_keys_ckpt_paths:
backend_empty_cache(torch_device)
single_file_kwargs = {}
if hasattr(self, "torch_dtype") and self.torch_dtype:
single_file_kwargs["torch_dtype"] = self.torch_dtype
model = self.model_class.from_single_file(ckpt_path, **single_file_kwargs)
del model
gc.collect()
backend_empty_cache(torch_device)
class SDSingleFileTesterMixin:
single_file_kwargs = {}

View File

@@ -13,26 +13,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import (
Lumina2Transformer2DModel,
)
from ..testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
from .single_file_testing_utils import SingleFileModelTesterMixin
enable_full_determinism()
@require_torch_accelerator
class Lumina2Transformer2DModelSingleFileTests(unittest.TestCase):
class TestLumina2Transformer2DModelSingleFile(SingleFileModelTesterMixin):
model_class = Lumina2Transformer2DModel
ckpt_path = "https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors"
alternate_keys_ckpt_paths = [
@@ -40,34 +35,4 @@ class Lumina2Transformer2DModelSingleFileTests(unittest.TestCase):
]
repo_id = "Alpha-VLLM/Lumina-Image-2.0"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_checkpoint_loading(self):
for ckpt_path in self.alternate_keys_ckpt_paths:
backend_empty_cache(torch_device)
model = self.model_class.from_single_file(ckpt_path)
del model
gc.collect()
backend_empty_cache(torch_device)
subfolder = "transformer"

View File

@@ -13,8 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
@@ -23,38 +21,24 @@ from diffusers import (
)
from ..testing_utils import (
backend_empty_cache,
enable_full_determinism,
load_hf_numpy,
numpy_cosine_similarity_distance,
require_torch_accelerator,
slow,
torch_device,
)
from .single_file_testing_utils import SingleFileModelTesterMixin
enable_full_determinism()
@slow
@require_torch_accelerator
class AutoencoderDCSingleFileTests(unittest.TestCase):
class TestAutoencoderDCSingleFile(SingleFileModelTesterMixin):
model_class = AutoencoderDC
ckpt_path = "https://huggingface.co/mit-han-lab/dc-ae-f32c32-sana-1.0/blob/main/model.safetensors"
repo_id = "mit-han-lab/dc-ae-f32c32-sana-1.0-diffusers"
main_input_name = "sample"
base_precision = 1e-2
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_file_format(self, seed, shape):
return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy"
@@ -80,18 +64,6 @@ class AutoencoderDCSingleFileTests(unittest.TestCase):
assert numpy_cosine_similarity_distance(output_slice_1, output_slice_2) < 1e-4
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id)
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)
def test_single_file_in_type_variant_components(self):
# `in` variant checkpoints require passing in a `config` parameter
# in order to set the scaling factor correctly.

View File

@@ -13,8 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
@@ -23,46 +21,19 @@ from diffusers import (
)
from ..testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
slow,
torch_device,
)
from .single_file_testing_utils import SingleFileModelTesterMixin
enable_full_determinism()
@slow
@require_torch_accelerator
class ControlNetModelSingleFileTests(unittest.TestCase):
class TestControlNetModelSingleFile(SingleFileModelTesterMixin):
model_class = ControlNetModel
ckpt_path = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth"
repo_id = "lllyasviel/control_v11p_sd15_canny"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id)
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_single_file_arguments(self):
model_default = self.model_class.from_single_file(self.ckpt_path)

View File

@@ -14,7 +14,6 @@
# limitations under the License.
import gc
import unittest
from diffusers import (
FluxTransformer2DModel,
@@ -23,52 +22,21 @@ from diffusers import (
from ..testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
from .single_file_testing_utils import SingleFileModelTesterMixin
enable_full_determinism()
@require_torch_accelerator
class FluxTransformer2DModelSingleFileTests(unittest.TestCase):
class TestFluxTransformer2DModelSingleFile(SingleFileModelTesterMixin):
model_class = FluxTransformer2DModel
ckpt_path = "https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/flux1-dev.safetensors"
alternate_keys_ckpt_paths = ["https://huggingface.co/Comfy-Org/flux1-dev/blob/main/flux1-dev-fp8.safetensors"]
repo_id = "black-forest-labs/FLUX.1-dev"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_checkpoint_loading(self):
for ckpt_path in self.alternate_keys_ckpt_paths:
backend_empty_cache(torch_device)
model = self.model_class.from_single_file(ckpt_path)
del model
gc.collect()
backend_empty_cache(torch_device)
subfolder = "transformer"
def test_device_map_cuda(self):
backend_empty_cache(torch_device)

View File

@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from diffusers import (
MotionAdapter,
@@ -27,7 +26,7 @@ from ..testing_utils import (
enable_full_determinism()
class MotionAdapterSingleFileTests(unittest.TestCase):
class MotionAdapterSingleFileTests:
model_class = MotionAdapter
def test_single_file_components_version_v1_5(self):

View File

@@ -14,7 +14,6 @@
# limitations under the License.
import gc
import unittest
import torch
@@ -37,14 +36,12 @@ enable_full_determinism()
@slow
@require_torch_accelerator
class StableCascadeUNetSingleFileTest(unittest.TestCase):
def setUp(self):
super().setUp()
class StableCascadeUNetSingleFileTest:
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)

View File

@@ -13,8 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
@@ -23,22 +21,18 @@ from diffusers import (
)
from ..testing_utils import (
backend_empty_cache,
enable_full_determinism,
load_hf_numpy,
numpy_cosine_similarity_distance,
require_torch_accelerator,
slow,
torch_device,
)
from .single_file_testing_utils import SingleFileModelTesterMixin
enable_full_determinism()
@slow
@require_torch_accelerator
class AutoencoderKLSingleFileTests(unittest.TestCase):
class TestAutoencoderKLSingleFile(SingleFileModelTesterMixin):
model_class = AutoencoderKL
ckpt_path = (
"https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors"
@@ -47,16 +41,6 @@ class AutoencoderKLSingleFileTests(unittest.TestCase):
main_input_name = "sample"
base_precision = 1e-2
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_file_format(self, seed, shape):
return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy"
@@ -84,18 +68,6 @@ class AutoencoderKLSingleFileTests(unittest.TestCase):
assert numpy_cosine_similarity_distance(output_slice_1, output_slice_2) < 1e-4
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id)
model_single_file = self.model_class.from_single_file(self.ckpt_path, config=self.repo_id)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)
def test_single_file_arguments(self):
model_default = self.model_class.from_single_file(self.ckpt_path, config=self.repo_id)

View File

@@ -13,50 +13,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import (
AutoencoderKLWan,
)
from ..testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
from .single_file_testing_utils import SingleFileModelTesterMixin
enable_full_determinism()
@require_torch_accelerator
class AutoencoderKLWanSingleFileTests(unittest.TestCase):
class TestAutoencoderKLWanSingleFile(SingleFileModelTesterMixin):
model_class = AutoencoderKLWan
ckpt_path = (
"https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/vae/wan_2.1_vae.safetensors"
)
repo_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="vae")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
subfolder = "vae"

View File

@@ -13,8 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
@@ -23,72 +21,26 @@ from diffusers import (
)
from ..testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_big_accelerator,
require_torch_accelerator,
torch_device,
)
from .single_file_testing_utils import SingleFileModelTesterMixin
enable_full_determinism()
@require_torch_accelerator
class WanTransformer3DModelText2VideoSingleFileTest(unittest.TestCase):
class TestWanTransformer3DModelText2VideoSingleFile(SingleFileModelTesterMixin):
model_class = WanTransformer3DModel
ckpt_path = "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/diffusion_models/wan2.1_t2v_1.3B_bf16.safetensors"
repo_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
subfolder = "transformer"
@require_big_accelerator
@require_torch_accelerator
class WanTransformer3DModelImage2VideoSingleFileTest(unittest.TestCase):
class TestWanTransformer3DModelImage2VideoSingleFile(SingleFileModelTesterMixin):
model_class = WanTransformer3DModel
ckpt_path = "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors"
repo_id = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
torch_dtype = torch.float8_e4m3fn
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer", torch_dtype=self.torch_dtype)
model_single_file = self.model_class.from_single_file(self.ckpt_path, torch_dtype=self.torch_dtype)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
subfolder = "transformer"

View File

@@ -1,23 +1,17 @@
import gc
import unittest
from diffusers import (
SanaTransformer2DModel,
)
from ..testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
from .single_file_testing_utils import SingleFileModelTesterMixin
enable_full_determinism()
@require_torch_accelerator
class SanaTransformer2DModelSingleFileTests(unittest.TestCase):
class TestSanaTransformer2DModelSingleFile(SingleFileModelTesterMixin):
model_class = SanaTransformer2DModel
ckpt_path = (
"https://huggingface.co/Efficient-Large-Model/Sana_1600M_1024px/blob/main/checkpoints/Sana_1600M_1024px.pth"
@@ -27,34 +21,4 @@ class SanaTransformer2DModelSingleFileTests(unittest.TestCase):
]
repo_id = "Efficient-Large-Model/Sana_1600M_1024px_diffusers"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_checkpoint_loading(self):
for ckpt_path in self.alternate_keys_ckpt_paths:
backend_empty_cache(torch_device)
model = self.model_class.from_single_file(ckpt_path)
del model
gc.collect()
backend_empty_cache(torch_device)
subfolder = "transformer"

View File

@@ -1,6 +1,5 @@
import gc
import tempfile
import unittest
import torch
@@ -29,7 +28,7 @@ enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionControlNetPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
class TestStableDiffusionControlNetPipelineSingleFileSlow(SDSingleFileTesterMixin):
pipeline_class = StableDiffusionControlNetPipeline
ckpt_path = (
"https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors"
@@ -39,13 +38,11 @@ class StableDiffusionControlNetPipelineSingleFileSlowTests(unittest.TestCase, SD
)
repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
def setUp(self):
super().setUp()
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)

View File

@@ -1,7 +1,7 @@
import gc
import tempfile
import unittest
import pytest
import torch
from diffusers import ControlNetModel, StableDiffusionControlNetInpaintPipeline
@@ -29,19 +29,17 @@ enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionControlNetInpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
class TestStableDiffusionControlNetInpaintPipelineSingleFileSlow(SDSingleFileTesterMixin):
pipeline_class = StableDiffusionControlNetInpaintPipeline
ckpt_path = "https://huggingface.co/botp/stable-diffusion-v1-5-inpainting/blob/main/sd-v1-5-inpainting.ckpt"
original_config = "https://raw.githubusercontent.com/runwayml/stable-diffusion/main/configs/stable-diffusion/v1-inpainting-inference.yaml"
repo_id = "stable-diffusion-v1-5/stable-diffusion-inpainting"
def setUp(self):
super().setUp()
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)
@@ -115,7 +113,7 @@ class StableDiffusionControlNetInpaintPipelineSingleFileSlowTests(unittest.TestC
super()._compare_component_configs(pipe, pipe_single_file)
@unittest.skip("runwayml original config repo does not exist")
@pytest.mark.skip(reason="runwayml original config repo does not exist")
def test_single_file_components_with_original_config(self):
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny", variant="fp16")
pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet)
@@ -125,7 +123,7 @@ class StableDiffusionControlNetInpaintPipelineSingleFileSlowTests(unittest.TestC
super()._compare_component_configs(pipe, pipe_single_file)
@unittest.skip("runwayml original config repo does not exist")
@pytest.mark.skip(reason="runwayml original config repo does not exist")
def test_single_file_components_with_original_config_local_files_only(self):
controlnet = ControlNetModel.from_pretrained(
"lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16"

View File

@@ -1,6 +1,5 @@
import gc
import tempfile
import unittest
import torch
@@ -29,7 +28,7 @@ enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionControlNetPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
class TestStableDiffusionControlNetPipelineSingleFileSlow(SDSingleFileTesterMixin):
pipeline_class = StableDiffusionControlNetPipeline
ckpt_path = (
"https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors"
@@ -39,13 +38,11 @@ class StableDiffusionControlNetPipelineSingleFileSlowTests(unittest.TestCase, SD
)
repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
def setUp(self):
super().setUp()
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)

View File

@@ -1,5 +1,4 @@
import gc
import unittest
import torch
@@ -23,7 +22,7 @@ enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
class TestStableDiffusionImg2ImgPipelineSingleFileSlow(SDSingleFileTesterMixin):
pipeline_class = StableDiffusionImg2ImgPipeline
ckpt_path = (
"https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors"
@@ -33,13 +32,11 @@ class StableDiffusionImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSin
)
repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
def setUp(self):
super().setUp()
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)
@@ -66,19 +63,17 @@ class StableDiffusionImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSin
@slow
@require_torch_accelerator
class StableDiffusion21Img2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
class TestStableDiffusion21Img2ImgPipelineSingleFileSlow(SDSingleFileTesterMixin):
pipeline_class = StableDiffusionImg2ImgPipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned.safetensors"
original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml"
repo_id = "stabilityai/stable-diffusion-2-1"
def setUp(self):
super().setUp()
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)

View File

@@ -1,6 +1,6 @@
import gc
import unittest
import pytest
import torch
from diffusers import (
@@ -23,19 +23,17 @@ enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionInpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
class TestStableDiffusionInpaintPipelineSingleFileSlow(SDSingleFileTesterMixin):
pipeline_class = StableDiffusionInpaintPipeline
ckpt_path = "https://huggingface.co/botp/stable-diffusion-v1-5-inpainting/blob/main/sd-v1-5-inpainting.ckpt"
original_config = "https://raw.githubusercontent.com/runwayml/stable-diffusion/main/configs/stable-diffusion/v1-inpainting-inference.yaml"
repo_id = "botp/stable-diffusion-v1-5-inpainting"
def setUp(self):
super().setUp()
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)
@@ -70,18 +68,18 @@ class StableDiffusionInpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSin
assert pipe.unet.config.in_channels == 4
@unittest.skip("runwayml original config has been removed")
@pytest.mark.skip(reason="runwayml original config has been removed")
def test_single_file_components_with_original_config(self):
return
@unittest.skip("runwayml original config has been removed")
@pytest.mark.skip(reason="runwayml original config has been removed")
def test_single_file_components_with_original_config_local_files_only(self):
return
@slow
@require_torch_accelerator
class StableDiffusion21InpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
class TestStableDiffusion21InpaintPipelineSingleFileSlow(SDSingleFileTesterMixin):
pipeline_class = StableDiffusionInpaintPipeline
ckpt_path = (
"https://huggingface.co/stabilityai/stable-diffusion-2-inpainting/blob/main/512-inpainting-ema.safetensors"
@@ -89,13 +87,11 @@ class StableDiffusion21InpaintPipelineSingleFileSlowTests(unittest.TestCase, SDS
original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inpainting-inference.yaml"
repo_id = "stabilityai/stable-diffusion-2-inpainting"
def setUp(self):
super().setUp()
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)

View File

@@ -1,6 +1,5 @@
import gc
import tempfile
import unittest
import torch
@@ -28,7 +27,7 @@ enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
class TestStableDiffusionPipelineSingleFileSlow(SDSingleFileTesterMixin):
pipeline_class = StableDiffusionPipeline
ckpt_path = (
"https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors"
@@ -38,13 +37,11 @@ class StableDiffusionPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFile
)
repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
def setUp(self):
super().setUp()
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)
@@ -90,19 +87,17 @@ class StableDiffusionPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFile
@slow
class StableDiffusion21PipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
class TestStableDiffusion21PipelineSingleFileSlow(SDSingleFileTesterMixin):
pipeline_class = StableDiffusionPipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned.safetensors"
original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml"
repo_id = "stabilityai/stable-diffusion-2-1"
def setUp(self):
super().setUp()
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)
@@ -125,7 +120,7 @@ class StableDiffusion21PipelineSingleFileSlowTests(unittest.TestCase, SDSingleFi
@nightly
@slow
@require_torch_accelerator
class StableDiffusionInstructPix2PixPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
class TestStableDiffusionInstructPix2PixPipelineSingleFileSlow(SDSingleFileTesterMixin):
pipeline_class = StableDiffusionInstructPix2PixPipeline
ckpt_path = "https://huggingface.co/timbrooks/instruct-pix2pix/blob/main/instruct-pix2pix-00-22000.safetensors"
original_config = (
@@ -134,13 +129,11 @@ class StableDiffusionInstructPix2PixPipelineSingleFileSlowTests(unittest.TestCas
repo_id = "timbrooks/instruct-pix2pix"
single_file_kwargs = {"extract_ema": True}
def setUp(self):
super().setUp()
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)

View File

@@ -1,5 +1,4 @@
import gc
import unittest
import pytest
import torch
@@ -25,19 +24,17 @@ enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionUpscalePipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
class TestStableDiffusionUpscalePipelineSingleFileSlow(SDSingleFileTesterMixin):
pipeline_class = StableDiffusionUpscalePipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler/blob/main/x4-upscaler-ema.safetensors"
original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/x4-upscaling.yaml"
repo_id = "stabilityai/stable-diffusion-x4-upscaler"
def setUp(self):
super().setUp()
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)

View File

@@ -1,6 +1,5 @@
import gc
import tempfile
import unittest
import torch
@@ -32,7 +31,7 @@ enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionXLAdapterPipelineSingleFileSlowTests(unittest.TestCase, SDXLSingleFileTesterMixin):
class TestStableDiffusionXLAdapterPipelineSingleFileSlow(SDXLSingleFileTesterMixin):
pipeline_class = StableDiffusionXLAdapterPipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors"
repo_id = "stabilityai/stable-diffusion-xl-base-1.0"
@@ -40,13 +39,11 @@ class StableDiffusionXLAdapterPipelineSingleFileSlowTests(unittest.TestCase, SDX
"https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml"
)
def setUp(self):
super().setUp()
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)

View File

@@ -1,6 +1,5 @@
import gc
import tempfile
import unittest
import torch
@@ -28,7 +27,7 @@ enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionXLControlNetPipelineSingleFileSlowTests(unittest.TestCase, SDXLSingleFileTesterMixin):
class TestStableDiffusionXLControlNetPipelineSingleFileSlow(SDXLSingleFileTesterMixin):
pipeline_class = StableDiffusionXLControlNetPipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors"
repo_id = "stabilityai/stable-diffusion-xl-base-1.0"
@@ -36,13 +35,11 @@ class StableDiffusionXLControlNetPipelineSingleFileSlowTests(unittest.TestCase,
"https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml"
)
def setUp(self):
super().setUp()
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)

View File

@@ -1,5 +1,4 @@
import gc
import unittest
import torch
@@ -25,7 +24,7 @@ enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionXLImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDXLSingleFileTesterMixin):
class TestStableDiffusionXLImg2ImgPipelineSingleFileSlow(SDXLSingleFileTesterMixin):
pipeline_class = StableDiffusionXLImg2ImgPipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors"
repo_id = "stabilityai/stable-diffusion-xl-base-1.0"
@@ -33,13 +32,11 @@ class StableDiffusionXLImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDX
"https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml"
)
def setUp(self):
super().setUp()
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)
@@ -66,7 +63,7 @@ class StableDiffusionXLImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDX
@slow
@require_torch_accelerator
class StableDiffusionXLImg2ImgRefinerPipelineSingleFileSlowTests(unittest.TestCase):
class StableDiffusionXLImg2ImgRefinerPipelineSingleFileSlowTests:
pipeline_class = StableDiffusionXLImg2ImgPipeline
ckpt_path = (
"https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/blob/main/sd_xl_refiner_1.0.safetensors"

View File

@@ -1,5 +1,4 @@
import gc
import unittest
import torch
@@ -19,19 +18,17 @@ enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionXLInstructPix2PixPipeline(unittest.TestCase):
class StableDiffusionXLInstructPix2PixPipeline:
pipeline_class = StableDiffusionXLInstructPix2PixPipeline
ckpt_path = "https://huggingface.co/stabilityai/cosxl/blob/main/cosxl_edit.safetensors"
original_config = None
repo_id = "diffusers/sdxl-instructpix2pix-768"
def setUp(self):
super().setUp()
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)

View File

@@ -1,5 +1,4 @@
import gc
import unittest
import torch
@@ -22,7 +21,7 @@ enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionXLPipelineSingleFileSlowTests(unittest.TestCase, SDXLSingleFileTesterMixin):
class TestStableDiffusionXLPipelineSingleFileSlow(SDXLSingleFileTesterMixin):
pipeline_class = StableDiffusionXLPipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors"
repo_id = "stabilityai/stable-diffusion-xl-base-1.0"
@@ -30,13 +29,11 @@ class StableDiffusionXLPipelineSingleFileSlowTests(unittest.TestCase, SDXLSingle
"https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml"
)
def setUp(self):
super().setUp()
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)