Compare commits

...

2 Commits

Author SHA1 Message Date
DN6
e48cfea91e update 2025-06-27 15:12:58 +05:30
DN6
1d2ea36678 update 2025-06-24 08:29:18 +05:30
13 changed files with 9 additions and 23 deletions

View File

@@ -248,7 +248,7 @@ jobs:
BIG_GPU_MEMORY: 40 BIG_GPU_MEMORY: 40
run: | run: |
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
-m "big_gpu_with_torch_cuda" \ -m "big_accelerator" \
--make-reports=tests_big_gpu_torch_cuda \ --make-reports=tests_big_gpu_torch_cuda \
--report-log=tests_big_gpu_torch_cuda.log \ --report-log=tests_big_gpu_torch_cuda.log \
tests/ tests/

View File

@@ -421,6 +421,10 @@ def require_big_accelerator(test_case):
Decorator marking a test that requires a bigger hardware accelerator (24GB) for execution. Some example pipelines: Decorator marking a test that requires a bigger hardware accelerator (24GB) for execution. Some example pipelines:
Flux, SD3, Cog, etc. Flux, SD3, Cog, etc.
""" """
import pytest
test_case = pytest.mark.big_accelerator(test_case)
if not is_torch_available(): if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case) return unittest.skip("test requires PyTorch")(test_case)

View File

@@ -30,6 +30,10 @@ sys.path.insert(1, git_repo_path)
warnings.simplefilter(action="ignore", category=FutureWarning) warnings.simplefilter(action="ignore", category=FutureWarning)
def pytest_configure(config):
config.addinivalue_line("markers", "big_accelerator: marks tests as requiring big accelerator resources")
def pytest_addoption(parser): def pytest_addoption(parser):
from diffusers.utils.testing_utils import pytest_addoption_shared from diffusers.utils.testing_utils import pytest_addoption_shared

View File

@@ -20,7 +20,6 @@ import tempfile
import unittest import unittest
import numpy as np import numpy as np
import pytest
import safetensors.torch import safetensors.torch
import torch import torch
from parameterized import parameterized from parameterized import parameterized
@@ -813,7 +812,6 @@ class FluxControlLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
@require_torch_accelerator @require_torch_accelerator
@require_peft_backend @require_peft_backend
@require_big_accelerator @require_big_accelerator
@pytest.mark.big_accelerator
class FluxLoRAIntegrationTests(unittest.TestCase): class FluxLoRAIntegrationTests(unittest.TestCase):
"""internal note: The integration slices were obtained on audace. """internal note: The integration slices were obtained on audace.
@@ -960,7 +958,6 @@ class FluxLoRAIntegrationTests(unittest.TestCase):
@require_torch_accelerator @require_torch_accelerator
@require_peft_backend @require_peft_backend
@require_big_accelerator @require_big_accelerator
@pytest.mark.big_accelerator
class FluxControlLoRAIntegrationTests(unittest.TestCase): class FluxControlLoRAIntegrationTests(unittest.TestCase):
num_inference_steps = 10 num_inference_steps = 10
seed = 0 seed = 0

View File

@@ -17,7 +17,6 @@ import sys
import unittest import unittest
import numpy as np import numpy as np
import pytest
import torch import torch
from transformers import CLIPTextModel, CLIPTokenizer, LlamaModel, LlamaTokenizerFast from transformers import CLIPTextModel, CLIPTokenizer, LlamaModel, LlamaTokenizerFast
@@ -198,7 +197,6 @@ class HunyuanVideoLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
@require_torch_accelerator @require_torch_accelerator
@require_peft_backend @require_peft_backend
@require_big_accelerator @require_big_accelerator
@pytest.mark.big_accelerator
class HunyuanVideoLoRAIntegrationTests(unittest.TestCase): class HunyuanVideoLoRAIntegrationTests(unittest.TestCase):
"""internal note: The integration slices were obtained on DGX. """internal note: The integration slices were obtained on DGX.

View File

@@ -17,7 +17,6 @@ import sys
import unittest import unittest
import numpy as np import numpy as np
import pytest
import torch import torch
from transformers import AutoTokenizer, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel from transformers import AutoTokenizer, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel
@@ -139,7 +138,6 @@ class SD3LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
@require_torch_accelerator @require_torch_accelerator
@require_peft_backend @require_peft_backend
@require_big_accelerator @require_big_accelerator
@pytest.mark.big_accelerator
class SD3LoraIntegrationTests(unittest.TestCase): class SD3LoraIntegrationTests(unittest.TestCase):
pipeline_class = StableDiffusion3Img2ImgPipeline pipeline_class = StableDiffusion3Img2ImgPipeline
repo_id = "stabilityai/stable-diffusion-3-medium-diffusers" repo_id = "stabilityai/stable-diffusion-3-medium-diffusers"

View File

@@ -17,7 +17,6 @@ import gc
import unittest import unittest
import numpy as np import numpy as np
import pytest
import torch import torch
from huggingface_hub import hf_hub_download from huggingface_hub import hf_hub_download
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
@@ -211,7 +210,6 @@ class FluxControlNetPipelineFastTests(unittest.TestCase, PipelineTesterMixin, Fl
@nightly @nightly
@require_big_accelerator @require_big_accelerator
@pytest.mark.big_accelerator
class FluxControlNetPipelineSlowTests(unittest.TestCase): class FluxControlNetPipelineSlowTests(unittest.TestCase):
pipeline_class = FluxControlNetPipeline pipeline_class = FluxControlNetPipeline

View File

@@ -18,7 +18,6 @@ import unittest
from typing import Optional from typing import Optional
import numpy as np import numpy as np
import pytest
import torch import torch
from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel
@@ -221,7 +220,6 @@ class StableDiffusion3ControlNetPipelineFastTests(unittest.TestCase, PipelineTes
@slow @slow
@require_big_accelerator @require_big_accelerator
@pytest.mark.big_accelerator
class StableDiffusion3ControlNetPipelineSlowTests(unittest.TestCase): class StableDiffusion3ControlNetPipelineSlowTests(unittest.TestCase):
pipeline_class = StableDiffusion3ControlNetPipeline pipeline_class = StableDiffusion3ControlNetPipeline

View File

@@ -2,7 +2,6 @@ import gc
import unittest import unittest
import numpy as np import numpy as np
import pytest
import torch import torch
from huggingface_hub import hf_hub_download from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel
@@ -224,7 +223,6 @@ class FluxPipelineFastTests(
@nightly @nightly
@require_big_accelerator @require_big_accelerator
@pytest.mark.big_accelerator
class FluxPipelineSlowTests(unittest.TestCase): class FluxPipelineSlowTests(unittest.TestCase):
pipeline_class = FluxPipeline pipeline_class = FluxPipeline
repo_id = "black-forest-labs/FLUX.1-schnell" repo_id = "black-forest-labs/FLUX.1-schnell"
@@ -312,7 +310,6 @@ class FluxPipelineSlowTests(unittest.TestCase):
@slow @slow
@require_big_accelerator @require_big_accelerator
@pytest.mark.big_accelerator
class FluxIPAdapterPipelineSlowTests(unittest.TestCase): class FluxIPAdapterPipelineSlowTests(unittest.TestCase):
pipeline_class = FluxPipeline pipeline_class = FluxPipeline
repo_id = "black-forest-labs/FLUX.1-dev" repo_id = "black-forest-labs/FLUX.1-dev"

View File

@@ -2,7 +2,6 @@ import gc
import unittest import unittest
import numpy as np import numpy as np
import pytest
import torch import torch
from diffusers import FluxPipeline, FluxPriorReduxPipeline from diffusers import FluxPipeline, FluxPriorReduxPipeline
@@ -19,7 +18,6 @@ from diffusers.utils.testing_utils import (
@slow @slow
@require_big_accelerator @require_big_accelerator
@pytest.mark.big_accelerator
class FluxReduxSlowTests(unittest.TestCase): class FluxReduxSlowTests(unittest.TestCase):
pipeline_class = FluxPriorReduxPipeline pipeline_class = FluxPriorReduxPipeline
repo_id = "black-forest-labs/FLUX.1-Redux-dev" repo_id = "black-forest-labs/FLUX.1-Redux-dev"

View File

@@ -17,7 +17,6 @@ import inspect
import unittest import unittest
import numpy as np import numpy as np
import pytest
import torch import torch
from transformers import AutoTokenizer, T5EncoderModel from transformers import AutoTokenizer, T5EncoderModel
@@ -268,7 +267,6 @@ class MochiPipelineFastTests(PipelineTesterMixin, FasterCacheTesterMixin, unitte
@nightly @nightly
@require_torch_accelerator @require_torch_accelerator
@require_big_accelerator @require_big_accelerator
@pytest.mark.big_accelerator
class MochiPipelineIntegrationTests(unittest.TestCase): class MochiPipelineIntegrationTests(unittest.TestCase):
prompt = "A painting of a squirrel eating a burger." prompt = "A painting of a squirrel eating a burger."

View File

@@ -2,7 +2,6 @@ import gc
import unittest import unittest
import numpy as np import numpy as np
import pytest
import torch import torch
from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel
@@ -233,7 +232,6 @@ class StableDiffusion3PipelineFastTests(unittest.TestCase, PipelineTesterMixin):
@slow @slow
@require_big_accelerator @require_big_accelerator
@pytest.mark.big_accelerator
class StableDiffusion3PipelineSlowTests(unittest.TestCase): class StableDiffusion3PipelineSlowTests(unittest.TestCase):
pipeline_class = StableDiffusion3Pipeline pipeline_class = StableDiffusion3Pipeline
repo_id = "stabilityai/stable-diffusion-3-medium-diffusers" repo_id = "stabilityai/stable-diffusion-3-medium-diffusers"

View File

@@ -3,7 +3,6 @@ import random
import unittest import unittest
import numpy as np import numpy as np
import pytest
import torch import torch
from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel
@@ -168,7 +167,6 @@ class StableDiffusion3Img2ImgPipelineFastTests(PipelineLatentTesterMixin, unitte
@slow @slow
@require_big_accelerator @require_big_accelerator
@pytest.mark.big_accelerator
class StableDiffusion3Img2ImgPipelineSlowTests(unittest.TestCase): class StableDiffusion3Img2ImgPipelineSlowTests(unittest.TestCase):
pipeline_class = StableDiffusion3Img2ImgPipeline pipeline_class = StableDiffusion3Img2ImgPipeline
repo_id = "stabilityai/stable-diffusion-3-medium-diffusers" repo_id = "stabilityai/stable-diffusion-3-medium-diffusers"