mirror of
https://github.com/huggingface/diffusers.git
synced 2025-12-06 12:34:13 +08:00
739 lines
24 KiB
YAML
739 lines
24 KiB
YAML
- title: Get started
|
||
sections:
|
||
- local: index
|
||
title: Diffusers
|
||
- local: installation
|
||
title: Installation
|
||
- local: quicktour
|
||
title: Quickstart
|
||
- local: stable_diffusion
|
||
title: Basic performance
|
||
|
||
- title: Pipelines
|
||
isExpanded: false
|
||
sections:
|
||
- local: using-diffusers/loading
|
||
title: DiffusionPipeline
|
||
- local: tutorials/autopipeline
|
||
title: AutoPipeline
|
||
- local: using-diffusers/custom_pipeline_overview
|
||
title: Community pipelines and components
|
||
- local: using-diffusers/callback
|
||
title: Pipeline callbacks
|
||
- local: using-diffusers/reusing_seeds
|
||
title: Reproducibility
|
||
- local: using-diffusers/schedulers
|
||
title: Schedulers
|
||
- local: using-diffusers/other-formats
|
||
title: Model formats
|
||
- local: using-diffusers/push_to_hub
|
||
title: Sharing pipelines and models
|
||
|
||
- title: Adapters
|
||
isExpanded: false
|
||
sections:
|
||
- local: tutorials/using_peft_for_inference
|
||
title: LoRA
|
||
- local: using-diffusers/ip_adapter
|
||
title: IP-Adapter
|
||
- local: using-diffusers/controlnet
|
||
title: ControlNet
|
||
- local: using-diffusers/t2i_adapter
|
||
title: T2I-Adapter
|
||
- local: using-diffusers/dreambooth
|
||
title: DreamBooth
|
||
- local: using-diffusers/textual_inversion_inference
|
||
title: Textual inversion
|
||
|
||
- title: Inference
|
||
isExpanded: false
|
||
sections:
|
||
- local: using-diffusers/weighted_prompts
|
||
title: Prompting
|
||
- local: using-diffusers/create_a_server
|
||
title: Create a server
|
||
- local: using-diffusers/batched_inference
|
||
title: Batch inference
|
||
- local: training/distributed_inference
|
||
title: Distributed inference
|
||
|
||
- title: Inference optimization
|
||
isExpanded: false
|
||
sections:
|
||
- local: optimization/fp16
|
||
title: Accelerate inference
|
||
- local: optimization/cache
|
||
title: Caching
|
||
- local: optimization/attention_backends
|
||
title: Attention backends
|
||
- local: optimization/memory
|
||
title: Reduce memory usage
|
||
- local: optimization/speed-memory-optims
|
||
title: Compiling and offloading quantized models
|
||
- title: Community optimizations
|
||
sections:
|
||
- local: optimization/pruna
|
||
title: Pruna
|
||
- local: optimization/xformers
|
||
title: xFormers
|
||
- local: optimization/tome
|
||
title: Token merging
|
||
- local: optimization/deepcache
|
||
title: DeepCache
|
||
- local: optimization/cache_dit
|
||
title: CacheDiT
|
||
- local: optimization/tgate
|
||
title: TGATE
|
||
- local: optimization/xdit
|
||
title: xDiT
|
||
- local: optimization/para_attn
|
||
title: ParaAttention
|
||
- local: using-diffusers/image_quality
|
||
title: FreeU
|
||
|
||
- title: Hybrid Inference
|
||
isExpanded: false
|
||
sections:
|
||
- local: hybrid_inference/overview
|
||
title: Overview
|
||
- local: hybrid_inference/vae_decode
|
||
title: VAE Decode
|
||
- local: hybrid_inference/vae_encode
|
||
title: VAE Encode
|
||
- local: hybrid_inference/api_reference
|
||
title: API Reference
|
||
|
||
- title: Modular Diffusers
|
||
isExpanded: false
|
||
sections:
|
||
- local: modular_diffusers/overview
|
||
title: Overview
|
||
- local: modular_diffusers/quickstart
|
||
title: Quickstart
|
||
- local: modular_diffusers/modular_diffusers_states
|
||
title: States
|
||
- local: modular_diffusers/pipeline_block
|
||
title: ModularPipelineBlocks
|
||
- local: modular_diffusers/sequential_pipeline_blocks
|
||
title: SequentialPipelineBlocks
|
||
- local: modular_diffusers/loop_sequential_pipeline_blocks
|
||
title: LoopSequentialPipelineBlocks
|
||
- local: modular_diffusers/auto_pipeline_blocks
|
||
title: AutoPipelineBlocks
|
||
- local: modular_diffusers/modular_pipeline
|
||
title: ModularPipeline
|
||
- local: modular_diffusers/components_manager
|
||
title: ComponentsManager
|
||
- local: modular_diffusers/guiders
|
||
title: Guiders
|
||
|
||
- title: Training
|
||
isExpanded: false
|
||
sections:
|
||
- local: training/overview
|
||
title: Overview
|
||
- local: training/create_dataset
|
||
title: Create a dataset for training
|
||
- local: training/adapt_a_model
|
||
title: Adapt a model to a new task
|
||
- local: tutorials/basic_training
|
||
title: Train a diffusion model
|
||
- title: Models
|
||
sections:
|
||
- local: training/unconditional_training
|
||
title: Unconditional image generation
|
||
- local: training/text2image
|
||
title: Text-to-image
|
||
- local: training/sdxl
|
||
title: Stable Diffusion XL
|
||
- local: training/kandinsky
|
||
title: Kandinsky 2.2
|
||
- local: training/wuerstchen
|
||
title: Wuerstchen
|
||
- local: training/controlnet
|
||
title: ControlNet
|
||
- local: training/t2i_adapters
|
||
title: T2I-Adapters
|
||
- local: training/instructpix2pix
|
||
title: InstructPix2Pix
|
||
- local: training/cogvideox
|
||
title: CogVideoX
|
||
- title: Methods
|
||
sections:
|
||
- local: training/text_inversion
|
||
title: Textual Inversion
|
||
- local: training/dreambooth
|
||
title: DreamBooth
|
||
- local: training/lora
|
||
title: LoRA
|
||
- local: training/custom_diffusion
|
||
title: Custom Diffusion
|
||
- local: training/lcm_distill
|
||
title: Latent Consistency Distillation
|
||
- local: training/ddpo
|
||
title: Reinforcement learning training with DDPO
|
||
|
||
- title: Quantization
|
||
isExpanded: false
|
||
sections:
|
||
- local: quantization/overview
|
||
title: Getting started
|
||
- local: quantization/bitsandbytes
|
||
title: bitsandbytes
|
||
- local: quantization/gguf
|
||
title: gguf
|
||
- local: quantization/torchao
|
||
title: torchao
|
||
- local: quantization/quanto
|
||
title: quanto
|
||
- local: quantization/modelopt
|
||
title: NVIDIA ModelOpt
|
||
|
||
- title: Model accelerators and hardware
|
||
isExpanded: false
|
||
sections:
|
||
- local: optimization/onnx
|
||
title: ONNX
|
||
- local: optimization/open_vino
|
||
title: OpenVINO
|
||
- local: optimization/coreml
|
||
title: Core ML
|
||
- local: optimization/mps
|
||
title: Metal Performance Shaders (MPS)
|
||
- local: optimization/habana
|
||
title: Intel Gaudi
|
||
- local: optimization/neuron
|
||
title: AWS Neuron
|
||
|
||
- title: Specific pipeline examples
|
||
isExpanded: false
|
||
sections:
|
||
- local: using-diffusers/consisid
|
||
title: ConsisID
|
||
- local: using-diffusers/sdxl
|
||
title: Stable Diffusion XL
|
||
- local: using-diffusers/sdxl_turbo
|
||
title: SDXL Turbo
|
||
- local: using-diffusers/kandinsky
|
||
title: Kandinsky
|
||
- local: using-diffusers/omnigen
|
||
title: OmniGen
|
||
- local: using-diffusers/pag
|
||
title: PAG
|
||
- local: using-diffusers/inference_with_lcm
|
||
title: Latent Consistency Model
|
||
- local: using-diffusers/shap-e
|
||
title: Shap-E
|
||
- local: using-diffusers/diffedit
|
||
title: DiffEdit
|
||
- local: using-diffusers/inference_with_tcd_lora
|
||
title: Trajectory Consistency Distillation-LoRA
|
||
- local: using-diffusers/svd
|
||
title: Stable Video Diffusion
|
||
- local: using-diffusers/marigold_usage
|
||
title: Marigold Computer Vision
|
||
|
||
- title: Resources
|
||
isExpanded: false
|
||
sections:
|
||
- title: Task recipes
|
||
sections:
|
||
- local: using-diffusers/unconditional_image_generation
|
||
title: Unconditional image generation
|
||
- local: using-diffusers/conditional_image_generation
|
||
title: Text-to-image
|
||
- local: using-diffusers/img2img
|
||
title: Image-to-image
|
||
- local: using-diffusers/inpaint
|
||
title: Inpainting
|
||
- local: advanced_inference/outpaint
|
||
title: Outpainting
|
||
- local: using-diffusers/text-img2vid
|
||
title: Video generation
|
||
- local: using-diffusers/depth2img
|
||
title: Depth-to-image
|
||
- local: using-diffusers/write_own_pipeline
|
||
title: Understanding pipelines, models and schedulers
|
||
- local: community_projects
|
||
title: Projects built with Diffusers
|
||
- local: conceptual/philosophy
|
||
title: Philosophy
|
||
- local: using-diffusers/controlling_generation
|
||
title: Controlled generation
|
||
- local: conceptual/contribution
|
||
title: How to contribute?
|
||
- local: conceptual/ethical_guidelines
|
||
title: Diffusers' Ethical Guidelines
|
||
- local: conceptual/evaluation
|
||
title: Evaluating Diffusion Models
|
||
|
||
- title: API
|
||
isExpanded: false
|
||
sections:
|
||
- title: Main Classes
|
||
sections:
|
||
- local: api/configuration
|
||
title: Configuration
|
||
- local: api/logging
|
||
title: Logging
|
||
- local: api/outputs
|
||
title: Outputs
|
||
- local: api/quantization
|
||
title: Quantization
|
||
- local: api/parallel
|
||
title: Parallel inference
|
||
- title: Modular
|
||
sections:
|
||
- local: api/modular_diffusers/pipeline
|
||
title: Pipeline
|
||
- local: api/modular_diffusers/pipeline_blocks
|
||
title: Blocks
|
||
- local: api/modular_diffusers/pipeline_states
|
||
title: States
|
||
- local: api/modular_diffusers/pipeline_components
|
||
title: Components and configs
|
||
- local: api/modular_diffusers/guiders
|
||
title: Guiders
|
||
- title: Loaders
|
||
sections:
|
||
- local: api/loaders/ip_adapter
|
||
title: IP-Adapter
|
||
- local: api/loaders/lora
|
||
title: LoRA
|
||
- local: api/loaders/single_file
|
||
title: Single files
|
||
- local: api/loaders/textual_inversion
|
||
title: Textual Inversion
|
||
- local: api/loaders/unet
|
||
title: UNet
|
||
- local: api/loaders/transformer_sd3
|
||
title: SD3Transformer2D
|
||
- local: api/loaders/peft
|
||
title: PEFT
|
||
- title: Models
|
||
sections:
|
||
- local: api/models/overview
|
||
title: Overview
|
||
- local: api/models/auto_model
|
||
title: AutoModel
|
||
- title: ControlNets
|
||
sections:
|
||
- local: api/models/controlnet
|
||
title: ControlNetModel
|
||
- local: api/models/controlnet_union
|
||
title: ControlNetUnionModel
|
||
- local: api/models/controlnet_flux
|
||
title: FluxControlNetModel
|
||
- local: api/models/controlnet_hunyuandit
|
||
title: HunyuanDiT2DControlNetModel
|
||
- local: api/models/controlnet_sana
|
||
title: SanaControlNetModel
|
||
- local: api/models/controlnet_sd3
|
||
title: SD3ControlNetModel
|
||
- local: api/models/controlnet_sparsectrl
|
||
title: SparseControlNetModel
|
||
- title: Transformers
|
||
sections:
|
||
- local: api/models/allegro_transformer3d
|
||
title: AllegroTransformer3DModel
|
||
- local: api/models/aura_flow_transformer2d
|
||
title: AuraFlowTransformer2DModel
|
||
- local: api/models/bria_transformer
|
||
title: BriaTransformer2DModel
|
||
- local: api/models/chroma_transformer
|
||
title: ChromaTransformer2DModel
|
||
- local: api/models/cogvideox_transformer3d
|
||
title: CogVideoXTransformer3DModel
|
||
- local: api/models/cogview3plus_transformer2d
|
||
title: CogView3PlusTransformer2DModel
|
||
- local: api/models/cogview4_transformer2d
|
||
title: CogView4Transformer2DModel
|
||
- local: api/models/consisid_transformer3d
|
||
title: ConsisIDTransformer3DModel
|
||
- local: api/models/cosmos_transformer3d
|
||
title: CosmosTransformer3DModel
|
||
- local: api/models/dit_transformer2d
|
||
title: DiTTransformer2DModel
|
||
- local: api/models/easyanimate_transformer3d
|
||
title: EasyAnimateTransformer3DModel
|
||
- local: api/models/flux_transformer
|
||
title: FluxTransformer2DModel
|
||
- local: api/models/hidream_image_transformer
|
||
title: HiDreamImageTransformer2DModel
|
||
- local: api/models/hunyuan_transformer2d
|
||
title: HunyuanDiT2DModel
|
||
- local: api/models/hunyuan_video_transformer_3d
|
||
title: HunyuanVideoTransformer3DModel
|
||
- local: api/models/latte_transformer3d
|
||
title: LatteTransformer3DModel
|
||
- local: api/models/ltx_video_transformer3d
|
||
title: LTXVideoTransformer3DModel
|
||
- local: api/models/lumina2_transformer2d
|
||
title: Lumina2Transformer2DModel
|
||
- local: api/models/lumina_nextdit2d
|
||
title: LuminaNextDiT2DModel
|
||
- local: api/models/mochi_transformer3d
|
||
title: MochiTransformer3DModel
|
||
- local: api/models/omnigen_transformer
|
||
title: OmniGenTransformer2DModel
|
||
- local: api/models/pixart_transformer2d
|
||
title: PixArtTransformer2DModel
|
||
- local: api/models/prior_transformer
|
||
title: PriorTransformer
|
||
- local: api/models/qwenimage_transformer2d
|
||
title: QwenImageTransformer2DModel
|
||
- local: api/models/sana_transformer2d
|
||
title: SanaTransformer2DModel
|
||
- local: api/models/sd3_transformer2d
|
||
title: SD3Transformer2DModel
|
||
- local: api/models/skyreels_v2_transformer_3d
|
||
title: SkyReelsV2Transformer3DModel
|
||
- local: api/models/stable_audio_transformer
|
||
title: StableAudioDiTModel
|
||
- local: api/models/transformer2d
|
||
title: Transformer2DModel
|
||
- local: api/models/transformer_temporal
|
||
title: TransformerTemporalModel
|
||
- local: api/models/wan_transformer_3d
|
||
title: WanTransformer3DModel
|
||
- title: UNets
|
||
sections:
|
||
- local: api/models/stable_cascade_unet
|
||
title: StableCascadeUNet
|
||
- local: api/models/unet
|
||
title: UNet1DModel
|
||
- local: api/models/unet2d-cond
|
||
title: UNet2DConditionModel
|
||
- local: api/models/unet2d
|
||
title: UNet2DModel
|
||
- local: api/models/unet3d-cond
|
||
title: UNet3DConditionModel
|
||
- local: api/models/unet-motion
|
||
title: UNetMotionModel
|
||
- local: api/models/uvit2d
|
||
title: UViT2DModel
|
||
- title: VAEs
|
||
sections:
|
||
- local: api/models/asymmetricautoencoderkl
|
||
title: AsymmetricAutoencoderKL
|
||
- local: api/models/autoencoder_dc
|
||
title: AutoencoderDC
|
||
- local: api/models/autoencoderkl
|
||
title: AutoencoderKL
|
||
- local: api/models/autoencoderkl_allegro
|
||
title: AutoencoderKLAllegro
|
||
- local: api/models/autoencoderkl_cogvideox
|
||
title: AutoencoderKLCogVideoX
|
||
- local: api/models/autoencoderkl_cosmos
|
||
title: AutoencoderKLCosmos
|
||
- local: api/models/autoencoder_kl_hunyuan_video
|
||
title: AutoencoderKLHunyuanVideo
|
||
- local: api/models/autoencoderkl_ltx_video
|
||
title: AutoencoderKLLTXVideo
|
||
- local: api/models/autoencoderkl_magvit
|
||
title: AutoencoderKLMagvit
|
||
- local: api/models/autoencoderkl_mochi
|
||
title: AutoencoderKLMochi
|
||
- local: api/models/autoencoderkl_qwenimage
|
||
title: AutoencoderKLQwenImage
|
||
- local: api/models/autoencoder_kl_wan
|
||
title: AutoencoderKLWan
|
||
- local: api/models/consistency_decoder_vae
|
||
title: ConsistencyDecoderVAE
|
||
- local: api/models/autoencoder_oobleck
|
||
title: Oobleck AutoEncoder
|
||
- local: api/models/autoencoder_tiny
|
||
title: Tiny AutoEncoder
|
||
- local: api/models/vq
|
||
title: VQModel
|
||
- title: Pipelines
|
||
sections:
|
||
- local: api/pipelines/overview
|
||
title: Overview
|
||
- local: api/pipelines/allegro
|
||
title: Allegro
|
||
- local: api/pipelines/amused
|
||
title: aMUSEd
|
||
- local: api/pipelines/animatediff
|
||
title: AnimateDiff
|
||
- local: api/pipelines/attend_and_excite
|
||
title: Attend-and-Excite
|
||
- local: api/pipelines/audioldm
|
||
title: AudioLDM
|
||
- local: api/pipelines/audioldm2
|
||
title: AudioLDM 2
|
||
- local: api/pipelines/aura_flow
|
||
title: AuraFlow
|
||
- local: api/pipelines/auto_pipeline
|
||
title: AutoPipeline
|
||
- local: api/pipelines/blip_diffusion
|
||
title: BLIP-Diffusion
|
||
- local: api/pipelines/bria_3_2
|
||
title: Bria 3.2
|
||
- local: api/pipelines/chroma
|
||
title: Chroma
|
||
- local: api/pipelines/cogvideox
|
||
title: CogVideoX
|
||
- local: api/pipelines/cogview3
|
||
title: CogView3
|
||
- local: api/pipelines/cogview4
|
||
title: CogView4
|
||
- local: api/pipelines/consisid
|
||
title: ConsisID
|
||
- local: api/pipelines/consistency_models
|
||
title: Consistency Models
|
||
- local: api/pipelines/controlnet
|
||
title: ControlNet
|
||
- local: api/pipelines/controlnet_flux
|
||
title: ControlNet with Flux.1
|
||
- local: api/pipelines/controlnet_hunyuandit
|
||
title: ControlNet with Hunyuan-DiT
|
||
- local: api/pipelines/controlnet_sd3
|
||
title: ControlNet with Stable Diffusion 3
|
||
- local: api/pipelines/controlnet_sdxl
|
||
title: ControlNet with Stable Diffusion XL
|
||
- local: api/pipelines/controlnet_sana
|
||
title: ControlNet-Sana
|
||
- local: api/pipelines/controlnetxs
|
||
title: ControlNet-XS
|
||
- local: api/pipelines/controlnetxs_sdxl
|
||
title: ControlNet-XS with Stable Diffusion XL
|
||
- local: api/pipelines/controlnet_union
|
||
title: ControlNetUnion
|
||
- local: api/pipelines/cosmos
|
||
title: Cosmos
|
||
- local: api/pipelines/dance_diffusion
|
||
title: Dance Diffusion
|
||
- local: api/pipelines/ddim
|
||
title: DDIM
|
||
- local: api/pipelines/ddpm
|
||
title: DDPM
|
||
- local: api/pipelines/deepfloyd_if
|
||
title: DeepFloyd IF
|
||
- local: api/pipelines/diffedit
|
||
title: DiffEdit
|
||
- local: api/pipelines/dit
|
||
title: DiT
|
||
- local: api/pipelines/easyanimate
|
||
title: EasyAnimate
|
||
- local: api/pipelines/flux
|
||
title: Flux
|
||
- local: api/pipelines/control_flux_inpaint
|
||
title: FluxControlInpaint
|
||
- local: api/pipelines/framepack
|
||
title: Framepack
|
||
- local: api/pipelines/hidream
|
||
title: HiDream-I1
|
||
- local: api/pipelines/hunyuandit
|
||
title: Hunyuan-DiT
|
||
- local: api/pipelines/hunyuan_video
|
||
title: HunyuanVideo
|
||
- local: api/pipelines/i2vgenxl
|
||
title: I2VGen-XL
|
||
- local: api/pipelines/pix2pix
|
||
title: InstructPix2Pix
|
||
- local: api/pipelines/kandinsky
|
||
title: Kandinsky 2.1
|
||
- local: api/pipelines/kandinsky_v22
|
||
title: Kandinsky 2.2
|
||
- local: api/pipelines/kandinsky3
|
||
title: Kandinsky 3
|
||
- local: api/pipelines/kolors
|
||
title: Kolors
|
||
- local: api/pipelines/latent_consistency_models
|
||
title: Latent Consistency Models
|
||
- local: api/pipelines/latent_diffusion
|
||
title: Latent Diffusion
|
||
- local: api/pipelines/latte
|
||
title: Latte
|
||
- local: api/pipelines/ledits_pp
|
||
title: LEDITS++
|
||
- local: api/pipelines/ltx_video
|
||
title: LTXVideo
|
||
- local: api/pipelines/lumina2
|
||
title: Lumina 2.0
|
||
- local: api/pipelines/lumina
|
||
title: Lumina-T2X
|
||
- local: api/pipelines/marigold
|
||
title: Marigold
|
||
- local: api/pipelines/mochi
|
||
title: Mochi
|
||
- local: api/pipelines/panorama
|
||
title: MultiDiffusion
|
||
- local: api/pipelines/musicldm
|
||
title: MusicLDM
|
||
- local: api/pipelines/omnigen
|
||
title: OmniGen
|
||
- local: api/pipelines/pag
|
||
title: PAG
|
||
- local: api/pipelines/paint_by_example
|
||
title: Paint by Example
|
||
- local: api/pipelines/pia
|
||
title: Personalized Image Animator (PIA)
|
||
- local: api/pipelines/pixart
|
||
title: PixArt-α
|
||
- local: api/pipelines/pixart_sigma
|
||
title: PixArt-Σ
|
||
- local: api/pipelines/qwenimage
|
||
title: QwenImage
|
||
- local: api/pipelines/sana
|
||
title: Sana
|
||
- local: api/pipelines/sana_sprint
|
||
title: Sana Sprint
|
||
- local: api/pipelines/self_attention_guidance
|
||
title: Self-Attention Guidance
|
||
- local: api/pipelines/semantic_stable_diffusion
|
||
title: Semantic Guidance
|
||
- local: api/pipelines/shap_e
|
||
title: Shap-E
|
||
- local: api/pipelines/skyreels_v2
|
||
title: SkyReels-V2
|
||
- local: api/pipelines/stable_audio
|
||
title: Stable Audio
|
||
- local: api/pipelines/stable_cascade
|
||
title: Stable Cascade
|
||
- title: Stable Diffusion
|
||
sections:
|
||
- local: api/pipelines/stable_diffusion/overview
|
||
title: Overview
|
||
- local: api/pipelines/stable_diffusion/depth2img
|
||
title: Depth-to-image
|
||
- local: api/pipelines/stable_diffusion/gligen
|
||
title: GLIGEN (Grounded Language-to-Image Generation)
|
||
- local: api/pipelines/stable_diffusion/image_variation
|
||
title: Image variation
|
||
- local: api/pipelines/stable_diffusion/img2img
|
||
title: Image-to-image
|
||
- local: api/pipelines/stable_diffusion/svd
|
||
title: Image-to-video
|
||
- local: api/pipelines/stable_diffusion/inpaint
|
||
title: Inpainting
|
||
- local: api/pipelines/stable_diffusion/k_diffusion
|
||
title: K-Diffusion
|
||
- local: api/pipelines/stable_diffusion/latent_upscale
|
||
title: Latent upscaler
|
||
- local: api/pipelines/stable_diffusion/ldm3d_diffusion
|
||
title: LDM3D Text-to-(RGB, Depth), Text-to-(RGB-pano, Depth-pano), LDM3D Upscaler
|
||
- local: api/pipelines/stable_diffusion/stable_diffusion_safe
|
||
title: Safe Stable Diffusion
|
||
- local: api/pipelines/stable_diffusion/sdxl_turbo
|
||
title: SDXL Turbo
|
||
- local: api/pipelines/stable_diffusion/stable_diffusion_2
|
||
title: Stable Diffusion 2
|
||
- local: api/pipelines/stable_diffusion/stable_diffusion_3
|
||
title: Stable Diffusion 3
|
||
- local: api/pipelines/stable_diffusion/stable_diffusion_xl
|
||
title: Stable Diffusion XL
|
||
- local: api/pipelines/stable_diffusion/upscale
|
||
title: Super-resolution
|
||
- local: api/pipelines/stable_diffusion/adapter
|
||
title: T2I-Adapter
|
||
- local: api/pipelines/stable_diffusion/text2img
|
||
title: Text-to-image
|
||
- local: api/pipelines/stable_unclip
|
||
title: Stable unCLIP
|
||
- local: api/pipelines/text_to_video
|
||
title: Text-to-video
|
||
- local: api/pipelines/text_to_video_zero
|
||
title: Text2Video-Zero
|
||
- local: api/pipelines/unclip
|
||
title: unCLIP
|
||
- local: api/pipelines/unidiffuser
|
||
title: UniDiffuser
|
||
- local: api/pipelines/value_guided_sampling
|
||
title: Value-guided sampling
|
||
- local: api/pipelines/visualcloze
|
||
title: VisualCloze
|
||
- local: api/pipelines/wan
|
||
title: Wan
|
||
- local: api/pipelines/wuerstchen
|
||
title: Wuerstchen
|
||
- title: Schedulers
|
||
sections:
|
||
- local: api/schedulers/overview
|
||
title: Overview
|
||
- local: api/schedulers/cm_stochastic_iterative
|
||
title: CMStochasticIterativeScheduler
|
||
- local: api/schedulers/ddim_cogvideox
|
||
title: CogVideoXDDIMScheduler
|
||
- local: api/schedulers/multistep_dpm_solver_cogvideox
|
||
title: CogVideoXDPMScheduler
|
||
- local: api/schedulers/consistency_decoder
|
||
title: ConsistencyDecoderScheduler
|
||
- local: api/schedulers/cosine_dpm
|
||
title: CosineDPMSolverMultistepScheduler
|
||
- local: api/schedulers/ddim_inverse
|
||
title: DDIMInverseScheduler
|
||
- local: api/schedulers/ddim
|
||
title: DDIMScheduler
|
||
- local: api/schedulers/ddpm
|
||
title: DDPMScheduler
|
||
- local: api/schedulers/deis
|
||
title: DEISMultistepScheduler
|
||
- local: api/schedulers/multistep_dpm_solver_inverse
|
||
title: DPMSolverMultistepInverse
|
||
- local: api/schedulers/multistep_dpm_solver
|
||
title: DPMSolverMultistepScheduler
|
||
- local: api/schedulers/dpm_sde
|
||
title: DPMSolverSDEScheduler
|
||
- local: api/schedulers/singlestep_dpm_solver
|
||
title: DPMSolverSinglestepScheduler
|
||
- local: api/schedulers/edm_multistep_dpm_solver
|
||
title: EDMDPMSolverMultistepScheduler
|
||
- local: api/schedulers/edm_euler
|
||
title: EDMEulerScheduler
|
||
- local: api/schedulers/euler_ancestral
|
||
title: EulerAncestralDiscreteScheduler
|
||
- local: api/schedulers/euler
|
||
title: EulerDiscreteScheduler
|
||
- local: api/schedulers/flow_match_euler_discrete
|
||
title: FlowMatchEulerDiscreteScheduler
|
||
- local: api/schedulers/flow_match_heun_discrete
|
||
title: FlowMatchHeunDiscreteScheduler
|
||
- local: api/schedulers/heun
|
||
title: HeunDiscreteScheduler
|
||
- local: api/schedulers/ipndm
|
||
title: IPNDMScheduler
|
||
- local: api/schedulers/stochastic_karras_ve
|
||
title: KarrasVeScheduler
|
||
- local: api/schedulers/dpm_discrete_ancestral
|
||
title: KDPM2AncestralDiscreteScheduler
|
||
- local: api/schedulers/dpm_discrete
|
||
title: KDPM2DiscreteScheduler
|
||
- local: api/schedulers/lcm
|
||
title: LCMScheduler
|
||
- local: api/schedulers/lms_discrete
|
||
title: LMSDiscreteScheduler
|
||
- local: api/schedulers/pndm
|
||
title: PNDMScheduler
|
||
- local: api/schedulers/repaint
|
||
title: RePaintScheduler
|
||
- local: api/schedulers/score_sde_ve
|
||
title: ScoreSdeVeScheduler
|
||
- local: api/schedulers/score_sde_vp
|
||
title: ScoreSdeVpScheduler
|
||
- local: api/schedulers/tcd
|
||
title: TCDScheduler
|
||
- local: api/schedulers/unipc
|
||
title: UniPCMultistepScheduler
|
||
- local: api/schedulers/vq_diffusion
|
||
title: VQDiffusionScheduler
|
||
- title: Internal classes
|
||
sections:
|
||
- local: api/internal_classes_overview
|
||
title: Overview
|
||
- local: api/attnprocessor
|
||
title: Attention Processor
|
||
- local: api/activations
|
||
title: Custom activation functions
|
||
- local: api/cache
|
||
title: Caching methods
|
||
- local: api/normalization
|
||
title: Custom normalization layers
|
||
- local: api/utilities
|
||
title: Utilities
|
||
- local: api/image_processor
|
||
title: VAE Image Processor
|
||
- local: api/video_processor
|
||
title: Video Processor
|