Compare commits

..

2 Commits

Author SHA1 Message Date
Sayak Paul
bd9ee5f94f Merge branch 'main' into disable-xformers-tests-selected-pipelines 2025-09-12 15:46:01 +05:30
sayakpaul
de07e5a8ec disable xformer tests for pipelines it isn't popular. 2025-09-03 14:19:14 +05:30
9 changed files with 16 additions and 133 deletions

View File

@@ -17,11 +17,10 @@ import torch
import torch.nn.functional as F
from torch import nn
from ..utils import deprecate, get_logger, is_torch_npu_available, is_torch_version
from ..utils import deprecate
from ..utils.import_utils import is_torch_npu_available, is_torch_version
logger = get_logger(__name__)
if is_torch_npu_available():
import torch_npu
@@ -32,7 +31,6 @@ ACT2CLS = {
"gelu": nn.GELU,
"relu": nn.ReLU,
}
KERNELS_REPO_ID = "kernels-community/activation"
def get_activation(act_fn: str) -> nn.Module:
@@ -92,27 +90,6 @@ class GELU(nn.Module):
return hidden_states
# TODO: validation checks / consider making Python classes of activations like `transformers`
# All of these are temporary for now.
class CUDAOptimizedGELU(GELU):
def __init__(self, *args, **kwargs):
from kernels import get_kernel
activation = get_kernel("kernels-community/activation", revision="add_more_act")
approximate = kwargs.get("approximate", "none")
super().__init__(*args, **kwargs)
if approximate == "none":
self.act_fn = activation.layers.Gelu()
elif approximate == "tanh":
self.act_fn = activation.layers.GeluTanh()
def forward(self, hidden_states):
hidden_states = self.proj(hidden_states)
hidden_states = self.act_fn(hidden_states)
return hidden_states
class GEGLU(nn.Module):
r"""
A [variant](https://huggingface.co/papers/2002.05202) of the gated linear unit activation function.

View File

@@ -20,20 +20,11 @@ import torch
import torch.nn as nn
import torch.nn.functional as F
from ..utils import is_kernels_available, is_torch_npu_available, is_torch_version
from ..utils.constants import DIFFUSERS_ENABLE_HUB_KERNELS
from ..utils.kernels_utils import use_kernel_forward_from_hub
from ..utils import is_torch_npu_available, is_torch_version
from .activations import get_activation
from .embeddings import CombinedTimestepLabelEmbeddings, PixArtAlphaCombinedTimestepSizeEmbeddings
if is_kernels_available() and DIFFUSERS_ENABLE_HUB_KERNELS:
from kernels import get_kernel
activation = get_kernel("kernels-community/activation", revision="add_more_act")
silu_kernel = activation.layers.Silu
class AdaLayerNorm(nn.Module):
r"""
Norm layer modified to incorporate timestep embeddings.
@@ -66,10 +57,7 @@ class AdaLayerNorm(nn.Module):
else:
self.emb = None
if DIFFUSERS_ENABLE_HUB_KERNELS:
self.silu = silu_kernel()
else:
self.silu = nn.SiLU()
self.silu = nn.SiLU()
self.linear = nn.Linear(embedding_dim, output_dim)
self.norm = nn.LayerNorm(output_dim // 2, norm_eps, norm_elementwise_affine)
@@ -156,10 +144,7 @@ class AdaLayerNormZero(nn.Module):
else:
self.emb = None
if DIFFUSERS_ENABLE_HUB_KERNELS:
self.silu = silu_kernel()
else:
self.silu = nn.SiLU()
self.silu = nn.SiLU()
self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=bias)
if norm_type == "layer_norm":
self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)
@@ -198,10 +183,7 @@ class AdaLayerNormZeroSingle(nn.Module):
def __init__(self, embedding_dim: int, norm_type="layer_norm", bias=True):
super().__init__()
if DIFFUSERS_ENABLE_HUB_KERNELS:
self.silu = silu_kernel()
else:
self.silu = nn.SiLU()
self.silu = nn.SiLU()
self.linear = nn.Linear(embedding_dim, 3 * embedding_dim, bias=bias)
if norm_type == "layer_norm":
self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)
@@ -353,10 +335,7 @@ class AdaLayerNormContinuous(nn.Module):
norm_type="layer_norm",
):
super().__init__()
if DIFFUSERS_ENABLE_HUB_KERNELS:
self.silu = silu_kernel()
else:
self.silu = nn.SiLU()
self.silu = nn.SiLU()
self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias)
if norm_type == "layer_norm":
self.norm = LayerNorm(embedding_dim, eps, elementwise_affine, bias)
@@ -529,7 +508,6 @@ else:
return F.layer_norm(input, self.dim, self.weight, self.bias, self.eps)
@use_kernel_forward_from_hub("RMSNorm")
class RMSNorm(nn.Module):
r"""
RMS Norm as introduced in https://huggingface.co/papers/1910.07467 by Zhang et al.

View File

@@ -22,8 +22,7 @@ import torch.nn.functional as F
from ...configuration_utils import ConfigMixin, register_to_config
from ...loaders import FluxTransformer2DLoadersMixin, FromOriginalModelMixin, PeftAdapterMixin
from ...utils import USE_PEFT_BACKEND, is_kernels_available, logging, scale_lora_layers, unscale_lora_layers
from ...utils.constants import DIFFUSERS_ENABLE_HUB_KERNELS
from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
from ...utils.torch_utils import maybe_allow_in_graph
from ..attention import AttentionMixin, AttentionModuleMixin, FeedForward
from ..attention_dispatch import dispatch_attention_fn
@@ -41,12 +40,6 @@ from ..normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNo
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
if is_kernels_available() and DIFFUSERS_ENABLE_HUB_KERNELS:
from kernels import get_kernel
activation = get_kernel("kernels-community/activation", revision="add_more_act")
gelu_tanh_kernel = activation.layers.GeluTanh
def _get_projections(attn: "FluxAttention", hidden_states, encoder_hidden_states=None):
query = attn.to_q(hidden_states)
@@ -307,14 +300,8 @@ class FluxAttention(torch.nn.Module, AttentionModuleMixin):
self.added_kv_proj_dim = added_kv_proj_dim
self.added_proj_bias = added_proj_bias
if DIFFUSERS_ENABLE_HUB_KERNELS:
from ..normalization import RMSNorm
self.norm_q = RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine)
self.norm_k = RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine)
else:
self.norm_q = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine)
self.norm_k = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine)
self.norm_q = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine)
self.norm_k = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine)
self.to_q = torch.nn.Linear(query_dim, self.inner_dim, bias=bias)
self.to_k = torch.nn.Linear(query_dim, self.inner_dim, bias=bias)
self.to_v = torch.nn.Linear(query_dim, self.inner_dim, bias=bias)
@@ -325,14 +312,8 @@ class FluxAttention(torch.nn.Module, AttentionModuleMixin):
self.to_out.append(torch.nn.Dropout(dropout))
if added_kv_proj_dim is not None:
if DIFFUSERS_ENABLE_HUB_KERNELS:
from ..normalization import RMSNorm
self.norm_added_q = RMSNorm(dim_head, eps=eps)
self.norm_added_k = RMSNorm(dim_head, eps=eps)
else:
self.norm_added_q = torch.nn.RMSNorm(dim_head, eps=eps)
self.norm_added_k = torch.nn.RMSNorm(dim_head, eps=eps)
self.norm_added_q = torch.nn.RMSNorm(dim_head, eps=eps)
self.norm_added_k = torch.nn.RMSNorm(dim_head, eps=eps)
self.add_q_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias)
self.add_k_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias)
self.add_v_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias)
@@ -370,11 +351,6 @@ class FluxSingleTransformerBlock(nn.Module):
self.norm = AdaLayerNormZeroSingle(dim)
self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim)
self.act_mlp = nn.GELU(approximate="tanh")
# if not DIFFUSERS_ENABLE_HUB_KERNELS:
# self.act_mlp = nn.GELU(approximate="tanh")
# else:
# self.act_mlp = gelu_tanh_kernel()
self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim)
self.attn = FluxAttention(

View File

@@ -505,13 +505,6 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin):
os.environ["PT_HPU_MAX_COMPOUND_OP_SIZE"] = "1"
logger.debug("Environment variable set: PT_HPU_MAX_COMPOUND_OP_SIZE=1")
if dtype in (torch.bfloat16, None) and kwargs.pop("sdp_on_bf16", True):
if hasattr(torch._C, "_set_math_sdp_allow_fp16_bf16_reduction"):
torch._C._set_math_sdp_allow_fp16_bf16_reduction(True)
logger.warning(
"Enabled SDP with BF16 precision on HPU. To disable, please use `.to('hpu', sdp_on_bf16=False)`"
)
module_names, _ = self._get_signature_keys(self)
modules = [getattr(self, n, None) for n in module_names]
modules = [m for m in modules if isinstance(m, torch.nn.Module)]

View File

@@ -1,5 +1,3 @@
from typing import Union
from ..utils import get_logger
from .import_utils import is_kernels_available
@@ -23,42 +21,3 @@ def _get_fa3_from_hub():
except Exception as e:
logger.error(f"An error occurred while fetching kernel '{_DEFAULT_HUB_ID_FA3}' from the Hub: {e}")
raise
if is_kernels_available():
from kernels import (
Device,
LayerRepository,
register_kernel_mapping,
replace_kernel_forward_from_hub,
use_kernel_forward_from_hub,
)
_KERNEL_MAPPING: dict[str, dict[Union[Device, str], LayerRepository]] = {
"RMSNorm": {
"cuda": LayerRepository(repo_id="kernels-community/liger_kernels", layer_name="LigerRMSNorm"),
},
}
register_kernel_mapping(_KERNEL_MAPPING)
else:
# Stub to make decorators int transformers work when `kernels`
# is not installed.
def use_kernel_forward_from_hub(*args, **kwargs):
def decorator(cls):
return cls
return decorator
class LayerRepository:
def __init__(self, *args, **kwargs):
raise RuntimeError("LayerRepository requires `kernels` to be installed. Run `pip install kernels`.")
def replace_kernel_forward_from_hub(*args, **kwargs):
raise RuntimeError(
"replace_kernel_forward_from_hub requires `kernels` to be installed. Run `pip install kernels`."
)
def register_kernel_mapping(*args, **kwargs):
raise RuntimeError("register_kernel_mapping requires `kernels` to be installed. Run `pip install kernels`.")

View File

@@ -48,6 +48,7 @@ class EasyAnimatePipelineFastTests(PipelineTesterMixin, unittest.TestCase):
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
test_xformers_attention = False
required_optional_params = frozenset(
[
"num_inference_steps",

View File

@@ -47,8 +47,8 @@ class HiDreamImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase):
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
required_optional_params = PipelineTesterMixin.required_optional_params
test_xformers_attention = False
test_layerwise_casting = True
supports_dduf = False

View File

@@ -22,7 +22,7 @@ class OmniGenPipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = OmniGenPipeline
params = frozenset(["prompt", "guidance_scale"])
batch_params = frozenset(["prompt"])
test_xformers_attention = False
test_layerwise_casting = True
def get_dummy_components(self):

View File

@@ -44,7 +44,6 @@ class QwenControlNetPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
batch_params = frozenset(["prompt", "negative_prompt", "control_image"])
image_params = frozenset(["control_image"])
image_latents_params = frozenset(["latents"])
required_optional_params = frozenset(
[
"num_inference_steps",
@@ -59,7 +58,7 @@ class QwenControlNetPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
)
supports_dduf = False
test_xformers_attention = True
test_xformers_attention = False
test_layerwise_casting = True
test_group_offloading = True