Compare commits

..

12 Commits

Author SHA1 Message Date
DN6
1d75ab7e35 update 2026-03-17 23:23:01 +05:30
DN6
b086b6da0a update 2026-03-17 15:40:27 +05:30
DN6
28c7516229 update 2026-03-17 15:38:03 +05:30
DN6
53b9b56059 update 2026-03-17 15:33:17 +05:30
DN6
65579667e9 update 2026-03-17 15:31:30 +05:30
DN6
cda1c36eeb update 2026-03-17 13:41:49 +05:30
DN6
f634485333 Merge branch 'main' into make-tiny-model 2026-03-17 13:37:02 +05:30
DN6
7820980959 update 2026-03-17 13:36:49 +05:30
Sayak Paul
16e7067647 [tests] fix llava kwargs in the hunyuan tests (#13275)
fix llava kwargs in the hunyuan tests
2026-03-17 10:11:47 +05:30
Dhruv Nair
d1b3555c29 [Modular] Fix dtype assignment when type hint is AutoModel (#13271)
* update

* update
2026-03-17 09:47:53 +05:30
Wang, Yi
9677859ebf fix parallelism case failure in xpu (#13270)
* fix parallelism case failure in xpu

Signed-off-by: Wang, Yi <yi.a.wang@intel.com>

* updated

Signed-off-by: Wang, Yi <yi.a.wang@intel.com>

---------

Signed-off-by: Wang, Yi <yi.a.wang@intel.com>
Co-authored-by: Sayak Paul <spsayakpaul@gmail.com>
2026-03-17 08:52:15 +05:30
Steven Liu
ed31974c3e [docs] updates (#13248)
* fixes

* few more links

* update zh

* fix
2026-03-16 13:24:57 -07:00
13 changed files with 249 additions and 156 deletions

View File

@@ -22,6 +22,8 @@
title: Reproducibility
- local: using-diffusers/schedulers
title: Schedulers
- local: using-diffusers/guiders
title: Guiders
- local: using-diffusers/automodel
title: AutoModel
- local: using-diffusers/other-formats
@@ -110,8 +112,6 @@
title: ModularPipeline
- local: modular_diffusers/components_manager
title: ComponentsManager
- local: modular_diffusers/guiders
title: Guiders
- local: modular_diffusers/custom_blocks
title: Building Custom Blocks
- local: modular_diffusers/mellon

View File

@@ -99,7 +99,7 @@ To update guider configuration, you can run `pipe.guider = pipe.guider.new(...)`
pipe.guider = pipe.guider.new(guidance_scale=5.0)
```
Read more on Guider [here](../../modular_diffusers/guiders).
Read more on Guider [here](../../using-diffusers/guiders).

View File

@@ -30,7 +30,7 @@ HunyuanImage-2.1 comes in the following variants:
## HunyuanImage-2.1
HunyuanImage-2.1 applies [Adaptive Projected Guidance (APG)](https://huggingface.co/papers/2410.02416) combined with Classifier-Free Guidance (CFG) in the denoising loop. `HunyuanImagePipeline` has a `guider` component (read more about [Guider](../modular_diffusers/guiders.md)) and does not take a `guidance_scale` parameter at runtime. To change guider-related parameters, e.g., `guidance_scale`, you can update the `guider` configuration instead.
HunyuanImage-2.1 applies [Adaptive Projected Guidance (APG)](https://huggingface.co/papers/2410.02416) combined with Classifier-Free Guidance (CFG) in the denoising loop. `HunyuanImagePipeline` has a `guider` component (read more about [Guider](../../using-diffusers/guiders)) and does not take a `guidance_scale` parameter at runtime. To change guider-related parameters, e.g., `guidance_scale`, you can update the `guider` configuration instead.
```python
import torch

View File

@@ -338,7 +338,7 @@ guider = ClassifierFreeGuidance(guidance_scale=5.0)
pipeline.update_components(guider=guider)
```
See the [Guiders](./guiders) guide for more details on available guiders and how to configure them.
See the [Guiders](../using-diffusers/guiders) guide for more details on available guiders and how to configure them.
## Splitting a pipeline into stages

View File

@@ -39,7 +39,7 @@ The Modular Diffusers docs are organized as shown below.
- [ModularPipeline](./modular_pipeline) shows you how to create and convert pipeline blocks into an executable [`ModularPipeline`].
- [ComponentsManager](./components_manager) shows you how to manage and reuse components across multiple pipelines.
- [Guiders](./guiders) shows you how to use different guidance methods in the pipeline.
- [Guiders](../using-diffusers/guiders) shows you how to use different guidance methods in the pipeline.
## Mellon Integration

View File

@@ -482,144 +482,6 @@ print(
) # (2880, 1, 960, 320) having a stride of 1 for the 2nd dimension proves that it works
```
## torch.jit.trace
[torch.jit.trace](https://pytorch.org/docs/stable/generated/torch.jit.trace.html) records the operations a model performs on a sample input and creates a new, optimized representation of the model based on the recorded execution path. During tracing, the model is optimized to reduce overhead from Python and dynamic control flows and operations are fused together for more efficiency. The returned executable or [ScriptFunction](https://pytorch.org/docs/stable/generated/torch.jit.ScriptFunction.html) can be compiled.
```py
import time
import torch
from diffusers import StableDiffusionPipeline
import functools
# torch disable grad
torch.set_grad_enabled(False)
# set variables
n_experiments = 2
unet_runs_per_experiment = 50
# load sample inputs
def generate_inputs():
sample = torch.randn((2, 4, 64, 64), device="cuda", dtype=torch.float16)
timestep = torch.rand(1, device="cuda", dtype=torch.float16) * 999
encoder_hidden_states = torch.randn((2, 77, 768), device="cuda", dtype=torch.float16)
return sample, timestep, encoder_hidden_states
pipeline = StableDiffusionPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5",
torch_dtype=torch.float16,
use_safetensors=True,
).to("cuda")
unet = pipeline.unet
unet.eval()
unet.to(memory_format=torch.channels_last) # use channels_last memory format
unet.forward = functools.partial(unet.forward, return_dict=False) # set return_dict=False as default
# warmup
for _ in range(3):
with torch.inference_mode():
inputs = generate_inputs()
orig_output = unet(*inputs)
# trace
print("tracing..")
unet_traced = torch.jit.trace(unet, inputs)
unet_traced.eval()
print("done tracing")
# warmup and optimize graph
for _ in range(5):
with torch.inference_mode():
inputs = generate_inputs()
orig_output = unet_traced(*inputs)
# benchmarking
with torch.inference_mode():
for _ in range(n_experiments):
torch.cuda.synchronize()
start_time = time.time()
for _ in range(unet_runs_per_experiment):
orig_output = unet_traced(*inputs)
torch.cuda.synchronize()
print(f"unet traced inference took {time.time() - start_time:.2f} seconds")
for _ in range(n_experiments):
torch.cuda.synchronize()
start_time = time.time()
for _ in range(unet_runs_per_experiment):
orig_output = unet(*inputs)
torch.cuda.synchronize()
print(f"unet inference took {time.time() - start_time:.2f} seconds")
# save the model
unet_traced.save("unet_traced.pt")
```
Replace the pipeline's UNet with the traced version.
```py
import torch
from diffusers import StableDiffusionPipeline
from dataclasses import dataclass
@dataclass
class UNet2DConditionOutput:
sample: torch.Tensor
pipeline = StableDiffusionPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5",
torch_dtype=torch.float16,
use_safetensors=True,
).to("cuda")
# use jitted unet
unet_traced = torch.jit.load("unet_traced.pt")
# del pipeline.unet
class TracedUNet(torch.nn.Module):
def __init__(self):
super().__init__()
self.in_channels = pipe.unet.config.in_channels
self.device = pipe.unet.device
def forward(self, latent_model_input, t, encoder_hidden_states):
sample = unet_traced(latent_model_input, t, encoder_hidden_states)[0]
return UNet2DConditionOutput(sample=sample)
pipeline.unet = TracedUNet()
with torch.inference_mode():
image = pipe([prompt] * 1, num_inference_steps=50).images[0]
```
## Memory-efficient attention
> [!TIP]
> Memory-efficient attention optimizes for memory usage *and* [inference speed](./fp16#scaled-dot-product-attention)!
The Transformers attention mechanism is memory-intensive, especially for long sequences, so you can try using different and more memory-efficient attention types.
By default, if PyTorch >= 2.0 is installed, [scaled dot-product attention (SDPA)](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) is used. You don't need to make any additional changes to your code.
SDPA supports [FlashAttention](https://github.com/Dao-AILab/flash-attention) and [xFormers](https://github.com/facebookresearch/xformers) as well as a native C++ PyTorch implementation. It automatically selects the most optimal implementation based on your input.
You can explicitly use xFormers with the [`~ModelMixin.enable_xformers_memory_efficient_attention`] method.
```py
# pip install xformers
import torch
from diffusers import StableDiffusionXLPipeline
pipeline = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch.float16,
).to("cuda")
pipeline.enable_xformers_memory_efficient_attention()
```
Call [`~ModelMixin.disable_xformers_memory_efficient_attention`] to disable it.
```py
pipeline.disable_xformers_memory_efficient_attention()
```
Diffusers supports multiple memory-efficient attention backends (FlashAttention, xFormers, SageAttention, and more) through [`~ModelMixin.set_attention_backend`]. Refer to the [Attention backends](./attention_backends) guide to learn how to switch between them.

View File

@@ -23,7 +23,7 @@ pip install xformers
> [!TIP]
> The xFormers `pip` package requires the latest version of PyTorch. If you need to use a previous version of PyTorch, then we recommend [installing xFormers from the source](https://github.com/facebookresearch/xformers#installing-xformers).
After xFormers is installed, you can use `enable_xformers_memory_efficient_attention()` for faster inference and reduced memory consumption as shown in this [section](memory#memory-efficient-attention).
After xFormers is installed, you can use it with [`~ModelMixin.set_attention_backend`] as shown in the [Attention backends](./attention_backends) guide.
> [!WARNING]
> According to this [issue](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212), xFormers `v0.0.16` cannot be used for training (fine-tune or DreamBooth) in some GPUs. If you observe this problem, please install a development version as indicated in the issue comments.

View File

@@ -14,6 +14,8 @@
sections:
- local: using-diffusers/schedulers
title: Load schedulers and models
- local: using-diffusers/guiders
title: Guiders
- title: Inference
isExpanded: false
@@ -80,8 +82,6 @@
title: ModularPipeline
- local: modular_diffusers/components_manager
title: ComponentsManager
- local: modular_diffusers/guiders
title: Guiders
- title: Training
isExpanded: false

View File

@@ -26,9 +26,17 @@ from diffusers.models._modeling_parallel import ContextParallelConfig
from ...testing_utils import (
is_context_parallel,
require_torch_multi_accelerator,
torch_device,
)
# Device configuration mapping
DEVICE_CONFIG = {
"cuda": {"backend": "nccl", "module": torch.cuda},
"xpu": {"backend": "xccl", "module": torch.xpu},
}
def _find_free_port():
"""Find a free port on localhost."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
@@ -47,12 +55,17 @@ def _context_parallel_worker(rank, world_size, master_port, model_class, init_di
os.environ["RANK"] = str(rank)
os.environ["WORLD_SIZE"] = str(world_size)
# Get device configuration
device_config = DEVICE_CONFIG.get(torch_device, DEVICE_CONFIG["cuda"])
backend = device_config["backend"]
device_module = device_config["module"]
# Initialize process group
dist.init_process_group(backend="nccl", rank=rank, world_size=world_size)
dist.init_process_group(backend=backend, rank=rank, world_size=world_size)
# Set device for this process
torch.cuda.set_device(rank)
device = torch.device(f"cuda:{rank}")
device_module.set_device(rank)
device = torch.device(f"{torch_device}:{rank}")
# Create model
model = model_class(**init_dict)
@@ -103,10 +116,16 @@ def _custom_mesh_worker(
os.environ["RANK"] = str(rank)
os.environ["WORLD_SIZE"] = str(world_size)
dist.init_process_group(backend="nccl", rank=rank, world_size=world_size)
# Get device configuration
device_config = DEVICE_CONFIG.get(torch_device, DEVICE_CONFIG["cuda"])
backend = device_config["backend"]
device_module = device_config["module"]
torch.cuda.set_device(rank)
device = torch.device(f"cuda:{rank}")
dist.init_process_group(backend=backend, rank=rank, world_size=world_size)
# Set device for this process
device_module.set_device(rank)
device = torch.device(f"{torch_device}:{rank}")
model = model_class(**init_dict)
model.to(device)
@@ -116,7 +135,7 @@ def _custom_mesh_worker(
# DeviceMesh must be created after init_process_group, inside each worker process.
mesh = torch.distributed.device_mesh.init_device_mesh(
"cuda", mesh_shape=mesh_shape, mesh_dim_names=mesh_dim_names
torch_device, mesh_shape=mesh_shape, mesh_dim_names=mesh_dim_names
)
cp_config = ContextParallelConfig(**cp_dict, mesh=mesh)
model.enable_parallelism(config=cp_config)

View File

@@ -139,7 +139,9 @@ class HunyuanVideoImageToVideoPipelineFastTests(
num_hidden_layers=2,
image_size=224,
)
llava_text_encoder_config = LlavaConfig(vision_config, text_config, pad_token_id=100, image_token_index=101)
llava_text_encoder_config = LlavaConfig(
vision_config=vision_config, text_config=text_config, pad_token_id=100, image_token_index=101
)
clip_text_encoder_config = CLIPTextConfig(
bos_token_id=0,

210
utils/make_tiny_model.py Normal file
View File

@@ -0,0 +1,210 @@
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "diffusers",
# "torch",
# "huggingface_hub",
# "accelerate",
# "transformers",
# "sentencepiece",
# "protobuf",
# ]
# ///
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility script to create tiny versions of diffusers models by reducing layer counts.
Can be run locally or submitted as an HF Job via `--launch`.
Usage:
# Run locally
python make_tiny_model.py --model_repo_id <model_repo_id> --output_repo_id <output_repo_id> [--subfolder transformer] [--num_layers 2]
# Push to Hub
python make_tiny_model.py --model_repo_id <model_repo_id> --output_repo_id <output_repo_id> --push_to_hub --token $HF_TOKEN
# Submit as an HF Job
python make_tiny_model.py --model_repo_id <model_repo_id> --output_repo_id <output_repo_id> --launch [--flavor cpu-basic]
"""
import argparse
import os
import re
LAYER_PARAM_PATTERN = re.compile(r"^(num_.*layers?|n_layers|n_refiner_layers)$")
DIM_PARAM_PATTERNS = {
re.compile(r"^num_attention_heads$"): 2,
re.compile(r"^num_.*attention_heads$"): 2,
re.compile(r"^num_key_value_heads$"): 2,
re.compile(r"^num_kv_heads$"): 1,
re.compile(r"^n_heads$"): 2,
re.compile(r"^n_kv_heads$"): 2,
re.compile(r"^attention_head_dim$"): 8,
re.compile(r"^.*attention_head_dim$"): 4,
re.compile(r"^cross_attention_dim.*$"): 8,
re.compile(r"^joint_attention_dim$"): 32,
re.compile(r"^pooled_projection_dim$"): 32,
re.compile(r"^caption_projection_dim$"): 32,
re.compile(r"^caption_channels$"): 8,
re.compile(r"^cap_feat_dim$"): 16,
re.compile(r"^hidden_size$"): 16,
re.compile(r"^dim$"): 16,
re.compile(r"^.*embed_dim$"): 16,
re.compile(r"^.*embed_.*dim$"): 16,
re.compile(r"^text_dim$"): 16,
re.compile(r"^time_embed_dim$"): 4,
re.compile(r"^ffn_dim$"): 32,
re.compile(r"^intermediate_size$"): 32,
re.compile(r"^sample_size$"): 32,
}
def parse_args():
parser = argparse.ArgumentParser(description="Create a tiny version of a diffusers model.")
parser.add_argument("--model_repo_id", type=str, required=True, help="HuggingFace repo ID of the source model.")
parser.add_argument(
"--output_repo_id",
type=str,
required=True,
help="HuggingFace repo ID or local path to save the tiny model to.",
)
parser.add_argument("--subfolder", type=str, default=None, help="Subfolder within the model repo.")
parser.add_argument("--num_layers", type=int, default=2, help="Number of layers to use for the tiny model.")
parser.add_argument(
"--shrink_dims",
action="store_true",
help="Also reduce dimension parameters (attention heads, hidden size, embedding dims, etc.).",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push the tiny model to the HuggingFace Hub.")
parser.add_argument(
"--token", type=str, default=None, help="HuggingFace token. Defaults to $HF_TOKEN env var if not provided."
)
launch_group = parser.add_argument_group("HF Jobs launch options")
launch_group.add_argument("--launch", action="store_true", help="Submit as an HF Job instead of running locally.")
launch_group.add_argument("--flavor", type=str, default="cpu-basic", help="HF Jobs hardware flavor.")
launch_group.add_argument("--timeout", type=str, default="30m", help="HF Jobs timeout.")
args = parser.parse_args()
if args.token is None:
args.token = os.environ.get("HF_TOKEN")
return args
def launch_job(args):
from huggingface_hub import run_uv_job
script_args = [
"--model_repo_id",
args.model_repo_id,
"--output_repo_id",
args.output_repo_id,
"--num_layers",
str(args.num_layers),
]
if args.subfolder:
script_args.extend(["--subfolder", args.subfolder])
if args.shrink_dims:
script_args.append("--shrink_dims")
if args.push_to_hub:
script_args.append("--push_to_hub")
job = run_uv_job(
__file__,
script_args=script_args,
flavor=args.flavor,
timeout=args.timeout,
secrets={"HF_TOKEN": args.token} if args.token else {},
)
print(f"Job submitted: {job.url}")
print(f"Job ID: {job.id}")
return job
def make_tiny_model(
model_repo_id, output_repo_id, subfolder=None, num_layers=2, shrink_dims=False, push_to_hub=False, token=None
):
from diffusers import AutoModel
config_kwargs = {}
if token:
config_kwargs["token"] = token
config = AutoModel.load_config(model_repo_id, subfolder=subfolder, **config_kwargs)
modified_keys = {}
for key, value in config.items():
if LAYER_PARAM_PATTERN.match(key) and isinstance(value, int) and value > num_layers:
modified_keys[key] = (value, num_layers)
config[key] = num_layers
if shrink_dims:
for key, value in config.items():
if not isinstance(value, int) or key.startswith("_"):
continue
for pattern, tiny_value in DIM_PARAM_PATTERNS.items():
if pattern.match(key) and value > tiny_value:
modified_keys[key] = (value, tiny_value)
config[key] = tiny_value
break
if not modified_keys:
print("WARNING: No config parameters were modified.")
print(f"Config keys: {[k for k in config if not k.startswith('_')]}")
return
print("Modified config parameters:")
for key, (old, new) in modified_keys.items():
print(f" {key}: {old} -> {new}")
model = AutoModel.from_config(config)
total_params = sum(p.numel() for p in model.parameters())
print(f"Tiny model created with {total_params:,} parameters.")
save_kwargs = {}
if token:
save_kwargs["token"] = token
if push_to_hub:
save_kwargs["repo_id"] = output_repo_id
model.save_pretrained(output_repo_id, push_to_hub=push_to_hub, **save_kwargs)
if push_to_hub:
print(f"Model pushed to https://huggingface.co/{output_repo_id}")
else:
print(f"Model saved to {output_repo_id}")
def main():
args = parse_args()
if args.launch:
launch_job(args)
else:
make_tiny_model(
model_repo_id=args.model_repo_id,
output_repo_id=args.output_repo_id,
subfolder=args.subfolder,
num_layers=args.num_layers,
shrink_dims=args.shrink_dims,
push_to_hub=args.push_to_hub,
token=args.token,
)
if __name__ == "__main__":
main()