mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-17 00:55:42 +08:00
Compare commits
15 Commits
fix-motion
...
unet-confi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
327243fd95 | ||
|
|
d2fc5ebb95 | ||
|
|
779eef95b4 | ||
|
|
d5b8d1ca04 | ||
|
|
eba7e7a6d7 | ||
|
|
31de879fb4 | ||
|
|
07349c25fe | ||
|
|
8974c50bff | ||
|
|
c18058b405 | ||
|
|
2938d5a672 | ||
|
|
d4ade821cd | ||
|
|
3a7e481611 | ||
|
|
d649d6c6f3 | ||
|
|
777063e1bf | ||
|
|
104afbce84 |
@@ -52,6 +52,8 @@
|
||||
title: Image-to-image
|
||||
- local: using-diffusers/inpaint
|
||||
title: Inpainting
|
||||
- local: using-diffusers/text-img2vid
|
||||
title: Text or image-to-video
|
||||
- local: using-diffusers/depth2img
|
||||
title: Depth-to-image
|
||||
title: Tasks
|
||||
@@ -323,6 +325,8 @@
|
||||
title: Text-to-image
|
||||
- local: api/pipelines/stable_diffusion/img2img
|
||||
title: Image-to-image
|
||||
- local: api/pipelines/stable_diffusion/svd
|
||||
title: Image-to-video
|
||||
- local: api/pipelines/stable_diffusion/inpaint
|
||||
title: Inpainting
|
||||
- local: api/pipelines/stable_diffusion/depth2img
|
||||
|
||||
@@ -20,6 +20,24 @@ An attention processor is a class for applying different types of attention mech
|
||||
## AttnProcessor2_0
|
||||
[[autodoc]] models.attention_processor.AttnProcessor2_0
|
||||
|
||||
## AttnAddedKVProcessor
|
||||
[[autodoc]] models.attention_processor.AttnAddedKVProcessor
|
||||
|
||||
## AttnAddedKVProcessor2_0
|
||||
[[autodoc]] models.attention_processor.AttnAddedKVProcessor2_0
|
||||
|
||||
## CrossFrameAttnProcessor
|
||||
[[autodoc]] pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor
|
||||
|
||||
## CustomDiffusionAttnProcessor
|
||||
[[autodoc]] models.attention_processor.CustomDiffusionAttnProcessor
|
||||
|
||||
## CustomDiffusionAttnProcessor2_0
|
||||
[[autodoc]] models.attention_processor.CustomDiffusionAttnProcessor2_0
|
||||
|
||||
## CustomDiffusionXFormersAttnProcessor
|
||||
[[autodoc]] models.attention_processor.CustomDiffusionXFormersAttnProcessor
|
||||
|
||||
## FusedAttnProcessor2_0
|
||||
[[autodoc]] models.attention_processor.FusedAttnProcessor2_0
|
||||
|
||||
@@ -29,32 +47,17 @@ An attention processor is a class for applying different types of attention mech
|
||||
## LoRAAttnProcessor2_0
|
||||
[[autodoc]] models.attention_processor.LoRAAttnProcessor2_0
|
||||
|
||||
## CustomDiffusionAttnProcessor
|
||||
[[autodoc]] models.attention_processor.CustomDiffusionAttnProcessor
|
||||
|
||||
## CustomDiffusionAttnProcessor2_0
|
||||
[[autodoc]] models.attention_processor.CustomDiffusionAttnProcessor2_0
|
||||
|
||||
## AttnAddedKVProcessor
|
||||
[[autodoc]] models.attention_processor.AttnAddedKVProcessor
|
||||
|
||||
## AttnAddedKVProcessor2_0
|
||||
[[autodoc]] models.attention_processor.AttnAddedKVProcessor2_0
|
||||
|
||||
## LoRAAttnAddedKVProcessor
|
||||
[[autodoc]] models.attention_processor.LoRAAttnAddedKVProcessor
|
||||
|
||||
## XFormersAttnProcessor
|
||||
[[autodoc]] models.attention_processor.XFormersAttnProcessor
|
||||
|
||||
## LoRAXFormersAttnProcessor
|
||||
[[autodoc]] models.attention_processor.LoRAXFormersAttnProcessor
|
||||
|
||||
## CustomDiffusionXFormersAttnProcessor
|
||||
[[autodoc]] models.attention_processor.CustomDiffusionXFormersAttnProcessor
|
||||
|
||||
## SlicedAttnProcessor
|
||||
[[autodoc]] models.attention_processor.SlicedAttnProcessor
|
||||
|
||||
## SlicedAttnAddedKVProcessor
|
||||
[[autodoc]] models.attention_processor.SlicedAttnAddedKVProcessor
|
||||
|
||||
## XFormersAttnProcessor
|
||||
[[autodoc]] models.attention_processor.XFormersAttnProcessor
|
||||
|
||||
43
docs/source/en/api/pipelines/stable_diffusion/svd.md
Normal file
43
docs/source/en/api/pipelines/stable_diffusion/svd.md
Normal file
@@ -0,0 +1,43 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Stable Video Diffusion
|
||||
|
||||
Stable Video Diffusion was proposed in [Stable Video Diffusion: Scaling Latent Video Diffusion Models to Large Datasets](https://hf.co/papers/2311.15127) by Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendelevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, Varun Jampani, Robin Rombach.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*We present Stable Video Diffusion - a latent video diffusion model for high-resolution, state-of-the-art text-to-video and image-to-video generation. Recently, latent diffusion models trained for 2D image synthesis have been turned into generative video models by inserting temporal layers and finetuning them on small, high-quality video datasets. However, training methods in the literature vary widely, and the field has yet to agree on a unified strategy for curating video data. In this paper, we identify and evaluate three different stages for successful training of video LDMs: text-to-image pretraining, video pretraining, and high-quality video finetuning. Furthermore, we demonstrate the necessity of a well-curated pretraining dataset for generating high-quality videos and present a systematic curation process to train a strong base model, including captioning and filtering strategies. We then explore the impact of finetuning our base model on high-quality data and train a text-to-video model that is competitive with closed-source video generation. We also show that our base model provides a powerful motion representation for downstream tasks such as image-to-video generation and adaptability to camera motion-specific LoRA modules. Finally, we demonstrate that our model provides a strong multi-view 3D-prior and can serve as a base to finetune a multi-view diffusion model that jointly generates multiple views of objects in a feedforward fashion, outperforming image-based methods at a fraction of their compute budget. We release code and model weights at this https URL.*
|
||||
|
||||
<Tip>
|
||||
|
||||
To learn how to use Stable Video Diffusion, take a look at the [Stable Video Diffusion](../../../using-diffusers/svd) guide.
|
||||
|
||||
<br>
|
||||
|
||||
Check out the [Stability AI](https://huggingface.co/stabilityai) Hub organization for the [base](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid) and [extended frame](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt) checkpoints!
|
||||
|
||||
</Tip>
|
||||
|
||||
## Tips
|
||||
|
||||
Video generation is memory-intensive and one way to reduce your memory usage is to set `enable_forward_chunking` on the pipeline's UNet so you don't run the entire feedforward layer at once. Breaking it up into chunks in a loop is more efficient.
|
||||
|
||||
Check out the [Text or image-to-video](text-img2vid) guide for more details about how certain parameters can affect video generation and how to optimize inference by reducing memory usage.
|
||||
|
||||
## StableVideoDiffusionPipeline
|
||||
|
||||
[[autodoc]] StableVideoDiffusionPipeline
|
||||
|
||||
## StableVideoDiffusionPipelineOutput
|
||||
|
||||
[[autodoc]] pipelines.stable_video_diffusion.StableVideoDiffusionPipelineOutput
|
||||
@@ -167,6 +167,12 @@ Here are some sample outputs:
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## Tips
|
||||
|
||||
Video generation is memory-intensive and one way to reduce your memory usage is to set `enable_forward_chunking` on the pipeline's UNet so you don't run the entire feedforward layer at once. Breaking it up into chunks in a loop is more efficient.
|
||||
|
||||
Check out the [Text or image-to-video](text-img2vid) guide for more details about how certain parameters can affect video generation and how to optimize inference by reducing memory usage.
|
||||
|
||||
<Tip>
|
||||
|
||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
|
||||
|
||||
@@ -245,7 +245,7 @@ Generating accurate faces is challenging because they are complex and nuanced. D
|
||||
* [ip-adapter-full-face_sd15.safetensors](https://huggingface.co/h94/IP-Adapter/blob/main/models/ip-adapter-full-face_sd15.safetensors) is conditioned with images of cropped faces and removed backgrounds
|
||||
* [ip-adapter-plus-face_sd15.safetensors](https://huggingface.co/h94/IP-Adapter/blob/main/models/ip-adapter-plus-face_sd15.safetensors) uses patch embeddings and is conditioned with images of cropped faces
|
||||
|
||||
> [TIP]
|
||||
> [!TIP]
|
||||
> [IP-Adapter-FaceID](https://huggingface.co/h94/IP-Adapter-FaceID) is a face-specific IP-Adapter trained with face ID embeddings instead of CLIP image embeddings, allowing you to generate more consistent faces in different contexts and styles. Try out this popular [community pipeline](https://github.com/huggingface/diffusers/tree/main/examples/community#ip-adapter-face-id) and see how it compares to the other face IP-Adapters.
|
||||
|
||||
For face models, use the [h94/IP-Adapter](https://huggingface.co/h94/IP-Adapter) checkpoint. It is also recommended to use [`DDIMScheduler`] or [`EulerDiscreteScheduler`] for face models.
|
||||
@@ -468,3 +468,83 @@ image
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ipa-controlnet-out.png" />
|
||||
</div>
|
||||
|
||||
### IP-Adapter masking
|
||||
|
||||
Binary masks can be used to specify which portion of the output image should be assigned to an IP-Adapter.
|
||||
For each input IP-Adapter image, a binary mask and an IP-Adapter must be provided.
|
||||
|
||||
Before passing the masks to the pipeline, it's essential to preprocess them using [`IPAdapterMaskProcessor.preprocess()`].
|
||||
|
||||
> [!TIP]
|
||||
> For optimal results, provide the output height and width to [`IPAdapterMaskProcessor.preprocess()`]. This ensures that masks with differing aspect ratios are appropriately stretched. If the input masks already match the aspect ratio of the generated image, specifying height and width can be omitted.
|
||||
|
||||
Here an example with two masks:
|
||||
|
||||
```py
|
||||
from diffusers.image_processor import IPAdapterMaskProcessor
|
||||
|
||||
mask1 = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_mask1.png")
|
||||
mask2 = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_mask2.png")
|
||||
|
||||
output_height = 1024
|
||||
output_width = 1024
|
||||
|
||||
processor = IPAdapterMaskProcessor()
|
||||
masks = processor.preprocess([mask1, mask2], height=output_height, width=output_width)
|
||||
```
|
||||
|
||||
<div class="flex flex-row gap-4">
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_mask1.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">mask one</figcaption>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_mask2.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">mask two</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
If you have more than one IP-Adapter image, load them into a list, ensuring each image is assigned to a different IP-Adapter.
|
||||
|
||||
```py
|
||||
face_image1 = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_girl1.png")
|
||||
face_image2 = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_girl2.png")
|
||||
|
||||
ip_images =[[image1], [image2]]
|
||||
|
||||
```
|
||||
|
||||
<div class="flex flex-row gap-4">
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_girl1.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">ip adapter image one</figcaption>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_girl2.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">ip adapter image two</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
Pass preprocessed masks to the pipeline using `cross_attention_kwargs` as shown below:
|
||||
|
||||
```py
|
||||
|
||||
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name=["ip-adapter-plus-face_sdxl_vit-h.safetensors"] * 2)
|
||||
pipeline.set_ip_adapter_scale([0.7] * 2)
|
||||
generator = torch.Generator(device="cpu").manual_seed(0)
|
||||
num_images=1
|
||||
|
||||
image = pipeline(
|
||||
prompt="2 girls",
|
||||
ip_adapter_image=ip_images,
|
||||
negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
|
||||
num_inference_steps=20, num_images_per_prompt=num_images,
|
||||
generator=generator, cross_attention_kwargs={"ip_adapter_masks": masks}
|
||||
).images[0]
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_attention_mask_result_seed_0.png" />
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">output image</figcaption>
|
||||
</div>
|
||||
|
||||
497
docs/source/en/using-diffusers/text-img2vid.md
Normal file
497
docs/source/en/using-diffusers/text-img2vid.md
Normal file
@@ -0,0 +1,497 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Text or image-to-video
|
||||
|
||||
Driven by the success of text-to-image diffusion models, generative video models are able to generate short clips of video from a text prompt or an initial image. These models extend a pretrained diffusion model to generate videos by adding some type of temporal and/or spatial convolution layer to the architecture. A mixed dataset of images and videos are used to train the model which learns to output a series of video frames based on the text or image conditioning.
|
||||
|
||||
This guide will show you how to generate videos, how to configure video model parameters, and how to control video generation.
|
||||
|
||||
## Popular models
|
||||
|
||||
> [!TIP]
|
||||
> Discover other cool and trending video generation models on the Hub [here](https://huggingface.co/models?pipeline_tag=text-to-video&sort=trending)!
|
||||
|
||||
[Stable Video Diffusions (SVD)](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid), [I2VGen-XL](https://huggingface.co/ali-vilab/i2vgen-xl/), [AnimateDiff](https://huggingface.co/guoyww/animatediff), and [ModelScopeT2V](https://huggingface.co/ali-vilab/text-to-video-ms-1.7b) are popular models used for video diffusion. Each model is distinct. For example, AnimateDiff inserts a motion modeling module into a frozen text-to-image model to generate personalized animated images, whereas SVD is entirely pretrained from scratch with a three-stage training process to generate short high-quality videos.
|
||||
|
||||
### Stable Video Diffusion
|
||||
|
||||
[SVD](../api/pipelines/svd) is based on the Stable Diffusion 2.1 model and it is trained on images, then low-resolution videos, and finally a smaller dataset of high-resolution videos. This model generates a short 2-4 second video from an initial image. You can learn more details about model, like micro-conditioning, in the [Stable Video Diffusion](../using-diffusers/svd) guide.
|
||||
|
||||
Begin by loading the [`StableVideoDiffusionPipeline`] and passing an initial image to generate a video from.
|
||||
|
||||
```py
|
||||
import torch
|
||||
from diffusers import StableVideoDiffusionPipeline
|
||||
from diffusers.utils import load_image, export_to_video
|
||||
|
||||
pipeline = StableVideoDiffusionPipeline.from_pretrained(
|
||||
"stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16"
|
||||
)
|
||||
pipeline.enable_model_cpu_offload()
|
||||
|
||||
image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png")
|
||||
image = image.resize((1024, 576))
|
||||
|
||||
generator = torch.manual_seed(42)
|
||||
frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0]
|
||||
export_to_video(frames, "generated.mp4", fps=7)
|
||||
```
|
||||
|
||||
<div class="flex gap-4">
|
||||
<div>
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption>
|
||||
</div>
|
||||
<div>
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/output_rocket.gif"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">generated video</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
### I2VGen-XL
|
||||
|
||||
[I2VGen-XL](../api/pipelines/i2vgenxl) is a diffusion model that can generate higher resolution videos than SVD and it is also capable of accepting text prompts in addition to images. The model is trained with two hierarchical encoders (detail and global encoder) to better capture low and high-level details in images. These learned details are used to train a video diffusion model which refines the video resolution and details in the generated video.
|
||||
|
||||
You can use I2VGen-XL by loading the [`I2VGenXLPipeline`], and passing a text and image prompt to generate a video.
|
||||
|
||||
```py
|
||||
import torch
|
||||
from diffusers import I2VGenXLPipeline
|
||||
from diffusers.utils import export_to_gif, load_image
|
||||
|
||||
pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16")
|
||||
pipeline.enable_model_cpu_offload()
|
||||
|
||||
image_url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/i2vgen_xl_images/img_0009.png"
|
||||
image = load_image(image_url).convert("RGB")
|
||||
|
||||
prompt = "Papers were floating in the air on a table in the library"
|
||||
negative_prompt = "Distorted, discontinuous, Ugly, blurry, low resolution, motionless, static, disfigured, disconnected limbs, Ugly faces, incomplete arms"
|
||||
generator = torch.manual_seed(8888)
|
||||
|
||||
frames = pipeline(
|
||||
prompt=prompt,
|
||||
image=image,
|
||||
num_inference_steps=50,
|
||||
negative_prompt=negative_prompt,
|
||||
guidance_scale=9.0,
|
||||
generator=generator
|
||||
).frames[0]
|
||||
export_to_gif(frames, "i2v.gif")
|
||||
```
|
||||
|
||||
<div class="flex gap-4">
|
||||
<div>
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/i2vgen_xl_images/img_0009.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption>
|
||||
</div>
|
||||
<div>
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/i2vgen-xl-example.gif"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">generated video</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
### AnimateDiff
|
||||
|
||||
[AnimateDiff](../api/pipelines/animatediff) is an adapter model that inserts a motion module into a pretrained diffusion model to animate an image. The adapter is trained on video clips to learn motion which is used to condition the generation process to create a video. It is faster and easier to only train the adapter and it can be loaded into most diffusion models, effectively turning them into "video models".
|
||||
|
||||
Start by loading a [`MotionAdapter`].
|
||||
|
||||
```py
|
||||
import torch
|
||||
from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter
|
||||
from diffusers.utils import export_to_gif
|
||||
|
||||
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
|
||||
```
|
||||
|
||||
Then load a finetuned Stable Diffusion model with the [`AnimateDiffPipeline`].
|
||||
|
||||
```py
|
||||
pipeline = AnimateDiffPipeline.from_pretrained("emilianJR/epiCRealism", motion_adapter=adapter, torch_dtype=torch.float16)
|
||||
scheduler = DDIMScheduler.from_pretrained(
|
||||
"emilianJR/epiCRealism",
|
||||
subfolder="scheduler",
|
||||
clip_sample=False,
|
||||
timestep_spacing="linspace",
|
||||
beta_schedule="linear",
|
||||
steps_offset=1,
|
||||
)
|
||||
pipeline.scheduler = scheduler
|
||||
pipeline.enable_vae_slicing()
|
||||
pipeline.enable_model_cpu_offload()
|
||||
```
|
||||
|
||||
Create a prompt and generate the video.
|
||||
|
||||
```py
|
||||
output = pipeline(
|
||||
prompt="A space rocket with trails of smoke behind it launching into space from the desert, 4k, high resolution",
|
||||
negative_prompt="bad quality, worse quality, low resolution",
|
||||
num_frames=16,
|
||||
guidance_scale=7.5,
|
||||
num_inference_steps=50,
|
||||
generator=torch.Generator("cpu").manual_seed(49),
|
||||
)
|
||||
frames = output.frames[0]
|
||||
export_to_gif(frames, "animation.gif")
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff.gif"/>
|
||||
</div>
|
||||
|
||||
### ModelscopeT2V
|
||||
|
||||
[ModelscopeT2V](../api/pipelines/text_to_video) adds spatial and temporal convolutions and attention to a UNet, and it is trained on image-text and video-text datasets to enhance what it learns during training. The model takes a prompt, encodes it and creates text embeddings which are denoised by the UNet, and then decoded by a VQGAN into a video.
|
||||
|
||||
<Tip>
|
||||
|
||||
ModelScopeT2V generates watermarked videos due to the datasets it was trained on. To use a watermark-free model, try the [cerspense/zeroscope_v2_76w](https://huggingface.co/cerspense/zeroscope_v2_576w) model with the [`TextToVideoSDPipeline`] first, and then upscale it's output with the [cerspense/zeroscope_v2_XL](https://huggingface.co/cerspense/zeroscope_v2_XL) checkpoint using the [`VideoToVideoSDPipeline`].
|
||||
|
||||
</Tip>
|
||||
|
||||
Load a ModelScopeT2V checkpoint into the [`DiffusionPipeline`] along with a prompt to generate a video.
|
||||
|
||||
```py
|
||||
import torch
|
||||
from diffusers import DiffusionPipeline
|
||||
from diffusers.utils import export_to_video
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16")
|
||||
pipeline.enable_model_cpu_offload()
|
||||
pipeline.enable_vae_slicing()
|
||||
|
||||
prompt = "Confident teddy bear surfer rides the wave in the tropics"
|
||||
video_frames = pipeline(prompt).frames[0]
|
||||
export_to_video(video_frames, "modelscopet2v.mp4", fps=10)
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/modelscopet2v.gif" />
|
||||
</div>
|
||||
|
||||
## Configure model parameters
|
||||
|
||||
There are a few important parameters you can configure in the pipeline that'll affect the video generation process and quality. Let's take a closer look at what these parameters do and how changing them affects the output.
|
||||
|
||||
### Number of frames
|
||||
|
||||
The `num_frames` parameter determines how many video frames are generated per second. A frame is an image that is played in a sequence of other frames to create motion or a video. This affects video length because the pipeline generates a certain number of frames per second (check a pipeline's API reference for the default value). To increase the video duration, you'll need to increase the `num_frames` parameter.
|
||||
|
||||
```py
|
||||
import torch
|
||||
from diffusers import StableVideoDiffusionPipeline
|
||||
from diffusers.utils import load_image, export_to_video
|
||||
|
||||
pipeline = StableVideoDiffusionPipeline.from_pretrained(
|
||||
"stabilityai/stable-video-diffusion-img2vid", torch_dtype=torch.float16, variant="fp16"
|
||||
)
|
||||
pipeline.enable_model_cpu_offload()
|
||||
|
||||
image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png")
|
||||
image = image.resize((1024, 576))
|
||||
|
||||
generator = torch.manual_seed(42)
|
||||
frames = pipeline(image, decode_chunk_size=8, generator=generator, num_frames=25).frames[0]
|
||||
export_to_video(frames, "generated.mp4", fps=7)
|
||||
```
|
||||
|
||||
<div class="flex gap-4">
|
||||
<div>
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/num_frames_14.gif"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">num_frames=14</figcaption>
|
||||
</div>
|
||||
<div>
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/num_frames_25.gif"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">num_frames=25</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
### Guidance scale
|
||||
|
||||
The `guidance_scale` parameter controls how closely aligned the generated video and text prompt or initial image is. A higher `guidance_scale` value means your generated video is more aligned with the text prompt or initial image, while a lower `guidance_scale` value means your generated video is less aligned which could give the model more "creativity" to interpret the conditioning input.
|
||||
|
||||
<Tip>
|
||||
|
||||
SVD uses the `min_guidance_scale` and `max_guidance_scale` parameters for applying guidance to the first and last frames respectively.
|
||||
|
||||
</Tip>
|
||||
|
||||
```py
|
||||
import torch
|
||||
from diffusers import I2VGenXLPipeline
|
||||
from diffusers.utils import export_to_gif, load_image
|
||||
|
||||
pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16")
|
||||
pipeline.enable_model_cpu_offload()
|
||||
|
||||
image_url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/i2vgen_xl_images/img_0009.png"
|
||||
image = load_image(image_url).convert("RGB")
|
||||
|
||||
prompt = "Papers were floating in the air on a table in the library"
|
||||
negative_prompt = "Distorted, discontinuous, Ugly, blurry, low resolution, motionless, static, disfigured, disconnected limbs, Ugly faces, incomplete arms"
|
||||
generator = torch.manual_seed(0)
|
||||
|
||||
frames = pipeline(
|
||||
prompt=prompt,
|
||||
image=image,
|
||||
num_inference_steps=50,
|
||||
negative_prompt=negative_prompt,
|
||||
guidance_scale=1.0,
|
||||
generator=generator
|
||||
).frames[0]
|
||||
export_to_gif(frames, "i2v.gif")
|
||||
```
|
||||
|
||||
<div class="flex gap-4">
|
||||
<div>
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/i2vgen-xl-example.gif"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale=9.0</figcaption>
|
||||
</div>
|
||||
<div>
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guidance_scale_1.0.gif"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale=1.0</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
### Negative prompt
|
||||
|
||||
A negative prompt deters the model from generating things you don’t want it to. This parameter is commonly used to improve overall generation quality by removing poor or bad features such as “low resolution” or “bad details”.
|
||||
|
||||
```py
|
||||
import torch
|
||||
from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter
|
||||
from diffusers.utils import export_to_gif
|
||||
|
||||
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
|
||||
|
||||
pipeline = AnimateDiffPipeline.from_pretrained("emilianJR/epiCRealism", motion_adapter=adapter, torch_dtype=torch.float16)
|
||||
scheduler = DDIMScheduler.from_pretrained(
|
||||
"emilianJR/epiCRealism",
|
||||
subfolder="scheduler",
|
||||
clip_sample=False,
|
||||
timestep_spacing="linspace",
|
||||
beta_schedule="linear",
|
||||
steps_offset=1,
|
||||
)
|
||||
pipeline.scheduler = scheduler
|
||||
pipeline.enable_vae_slicing()
|
||||
pipeline.enable_model_cpu_offload()
|
||||
|
||||
output = pipeline(
|
||||
prompt="360 camera shot of a sushi roll in a restaurant",
|
||||
negative_prompt="Distorted, discontinuous, ugly, blurry, low resolution, motionless, static",
|
||||
num_frames=16,
|
||||
guidance_scale=7.5,
|
||||
num_inference_steps=50,
|
||||
generator=torch.Generator("cpu").manual_seed(0),
|
||||
)
|
||||
frames = output.frames[0]
|
||||
export_to_gif(frames, "animation.gif")
|
||||
```
|
||||
|
||||
<div class="flex gap-4">
|
||||
<div>
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff_no_neg.gif"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">no negative prompt</figcaption>
|
||||
</div>
|
||||
<div>
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff_neg.gif"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">negative prompt applied</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
### Model-specific parameters
|
||||
|
||||
There are some pipeline parameters that are unique to each model such as adjusting the motion in a video or adding noise to the initial image.
|
||||
|
||||
<hfoptions id="special-parameters">
|
||||
<hfoption id="Stable Video Diffusion">
|
||||
|
||||
Stable Video Diffusion provides additional micro-conditioning for the frame rate with the `fps` parameter and for motion with the `motion_bucket_id` parameter. Together, these parameters allow for adjusting the amount of motion in the generated video.
|
||||
|
||||
There is also a `noise_aug_strength` parameter that increases the amount of noise added to the initial image. Varying this parameter affects how similar the generated video and initial image are. A higher `noise_aug_strength` also increases the amount of motion. To learn more, read the [Micro-conditioning](../using-diffusers/svd#micro-conditioning) guide.
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Text2Video-Zero">
|
||||
|
||||
Text2Video-Zero computes the amount of motion to apply to each frame from randomly sampled latents. You can use the `motion_field_strength_x` and `motion_field_strength_y` parameters to control the amount of motion to apply to the x and y-axes of the video. The parameters `t0` and `t1` are the timesteps to apply motion to the latents.
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## Control video generation
|
||||
|
||||
Video generation can be controlled similar to how text-to-image, image-to-image, and inpainting can be controlled with a [`ControlNetModel`]. The only difference is you need to use the [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor`] so each frame attends to the first frame.
|
||||
|
||||
### Text2Video-Zero
|
||||
|
||||
Text2Video-Zero video generation can be conditioned on pose and edge images for even greater control over a subject's motion in the generated video or to preserve the identity of a subject/object in the video. You can also use Text2Video-Zero with [InstructPix2Pix](../api/pipelines/pix2pix) for editing videos with text.
|
||||
|
||||
<hfoptions id="t2v-zero">
|
||||
<hfoption id="pose control">
|
||||
|
||||
Start by downloading a video and extracting the pose images from it.
|
||||
|
||||
```py
|
||||
from huggingface_hub import hf_hub_download
|
||||
from PIL import Image
|
||||
import imageio
|
||||
|
||||
filename = "__assets__/poses_skeleton_gifs/dance1_corr.mp4"
|
||||
repo_id = "PAIR/Text2Video-Zero"
|
||||
video_path = hf_hub_download(repo_type="space", repo_id=repo_id, filename=filename)
|
||||
|
||||
reader = imageio.get_reader(video_path, "ffmpeg")
|
||||
frame_count = 8
|
||||
pose_images = [Image.fromarray(reader.get_data(i)) for i in range(frame_count)]
|
||||
```
|
||||
|
||||
Load a [`ControlNetModel`] for pose estimation and a checkpoint into the [`StableDiffusionControlNetPipeline`]. Then you'll use the [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor`] for the UNet and ControlNet.
|
||||
|
||||
```py
|
||||
import torch
|
||||
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
|
||||
from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor
|
||||
|
||||
model_id = "runwayml/stable-diffusion-v1-5"
|
||||
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16)
|
||||
pipeline = StableDiffusionControlNetPipeline.from_pretrained(
|
||||
model_id, controlnet=controlnet, torch_dtype=torch.float16
|
||||
).to("cuda")
|
||||
|
||||
pipeline.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2))
|
||||
pipeline.controlnet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2))
|
||||
```
|
||||
|
||||
Fix the latents for all the frames, and then pass your prompt and extracted pose images to the model to generate a video.
|
||||
|
||||
```py
|
||||
latents = torch.randn((1, 4, 64, 64), device="cuda", dtype=torch.float16).repeat(len(pose_images), 1, 1, 1)
|
||||
|
||||
prompt = "Darth Vader dancing in a desert"
|
||||
result = pipeline(prompt=[prompt] * len(pose_images), image=pose_images, latents=latents).images
|
||||
imageio.mimsave("video.mp4", result, fps=4)
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="edge control">
|
||||
|
||||
Download a video and extract the edges from it.
|
||||
|
||||
```py
|
||||
from huggingface_hub import hf_hub_download
|
||||
from PIL import Image
|
||||
import imageio
|
||||
|
||||
filename = "__assets__/poses_skeleton_gifs/dance1_corr.mp4"
|
||||
repo_id = "PAIR/Text2Video-Zero"
|
||||
video_path = hf_hub_download(repo_type="space", repo_id=repo_id, filename=filename)
|
||||
|
||||
reader = imageio.get_reader(video_path, "ffmpeg")
|
||||
frame_count = 8
|
||||
pose_images = [Image.fromarray(reader.get_data(i)) for i in range(frame_count)]
|
||||
```
|
||||
|
||||
Load a [`ControlNetModel`] for canny edge and a checkpoint into the [`StableDiffusionControlNetPipeline`]. Then you'll use the [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor`] for the UNet and ControlNet.
|
||||
|
||||
```py
|
||||
import torch
|
||||
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
|
||||
from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor
|
||||
|
||||
model_id = "runwayml/stable-diffusion-v1-5"
|
||||
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
||||
pipeline = StableDiffusionControlNetPipeline.from_pretrained(
|
||||
model_id, controlnet=controlnet, torch_dtype=torch.float16
|
||||
).to("cuda")
|
||||
|
||||
pipeline.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2))
|
||||
pipeline.controlnet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2))
|
||||
```
|
||||
|
||||
Fix the latents for all the frames, and then pass your prompt and extracted edge images to the model to generate a video.
|
||||
|
||||
```py
|
||||
latents = torch.randn((1, 4, 64, 64), device="cuda", dtype=torch.float16).repeat(len(pose_images), 1, 1, 1)
|
||||
|
||||
prompt = "Darth Vader dancing in a desert"
|
||||
result = pipeline(prompt=[prompt] * len(pose_images), image=pose_images, latents=latents).images
|
||||
imageio.mimsave("video.mp4", result, fps=4)
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="InstructPix2Pix">
|
||||
|
||||
InstructPix2Pix allows you to use text to describe the changes you want to make to the video. Start by downloading and reading a video.
|
||||
|
||||
```py
|
||||
from huggingface_hub import hf_hub_download
|
||||
from PIL import Image
|
||||
import imageio
|
||||
|
||||
filename = "__assets__/pix2pix video/camel.mp4"
|
||||
repo_id = "PAIR/Text2Video-Zero"
|
||||
video_path = hf_hub_download(repo_type="space", repo_id=repo_id, filename=filename)
|
||||
|
||||
reader = imageio.get_reader(video_path, "ffmpeg")
|
||||
frame_count = 8
|
||||
video = [Image.fromarray(reader.get_data(i)) for i in range(frame_count)]
|
||||
```
|
||||
|
||||
Load the [`StableDiffusionInstructPix2PixPipeline`] and set the [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor`] for the UNet.
|
||||
|
||||
```py
|
||||
import torch
|
||||
from diffusers import StableDiffusionInstructPix2PixPipeline
|
||||
from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor
|
||||
|
||||
pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained("timbrooks/instruct-pix2pix", torch_dtype=torch.float16).to("cuda")
|
||||
pipeline.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=3))
|
||||
```
|
||||
|
||||
Pass a prompt describing the change you want to apply to the video.
|
||||
|
||||
```py
|
||||
prompt = "make it Van Gogh Starry Night style"
|
||||
result = pipeline(prompt=[prompt] * len(video), image=video).images
|
||||
imageio.mimsave("edited_video.mp4", result, fps=4)
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## Optimize
|
||||
|
||||
Video generation requires a lot of memory because you're generating many video frames at once. You can reduce your memory requirements at the expense of some inference speed. Try:
|
||||
|
||||
1. offloading pipeline components that are no longer needed to the CPU
|
||||
2. feed-forward chunking runs the feed-forward layer in a loop instead of all at once
|
||||
3. break up the number of frames the VAE has to decode into chunks instead of decoding them all at once
|
||||
|
||||
```diff
|
||||
- pipeline.enable_model_cpu_offload()
|
||||
- frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0]
|
||||
+ pipeline.enable_model_cpu_offload()
|
||||
+ pipeline.unet.enable_forward_chunking()
|
||||
+ frames = pipeline(image, decode_chunk_size=2, generator=generator, num_frames=25).frames[0]
|
||||
```
|
||||
|
||||
If memory is not an issue and you want to optimize for speed, try wrapping the UNet with [`torch.compile`](../optimization/torch2.0#torchcompile).
|
||||
|
||||
```diff
|
||||
- pipeline.enable_model_cpu_offload()
|
||||
+ pipeline.to("cuda")
|
||||
+ pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True)
|
||||
```
|
||||
@@ -53,6 +53,7 @@ from diffusers import (
|
||||
)
|
||||
from diffusers.optimization import get_scheduler
|
||||
from diffusers.utils import check_min_version, is_wandb_available
|
||||
from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
|
||||
|
||||
@@ -84,32 +85,30 @@ check_min_version("0.27.0.dev0")
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
def save_model_card(repo_id: str, images=None, base_model=str, repo_folder=None):
|
||||
def save_model_card(repo_id: str, images: list = None, base_model: str = None, repo_folder: str = None):
|
||||
img_str = ""
|
||||
for i, image in enumerate(images):
|
||||
image.save(os.path.join(repo_folder, f"image_{i}.png"))
|
||||
img_str += f"\n"
|
||||
|
||||
yaml = f"""
|
||||
---
|
||||
license: creativeml-openrail-m
|
||||
base_model: {base_model}
|
||||
tags:
|
||||
- stable-diffusion
|
||||
- stable-diffusion-diffusers
|
||||
- text-to-image
|
||||
- diffusers
|
||||
- textual_inversion
|
||||
inference: true
|
||||
---
|
||||
"""
|
||||
model_card = f"""
|
||||
if images is not None:
|
||||
for i, image in enumerate(images):
|
||||
image.save(os.path.join(repo_folder, f"image_{i}.png"))
|
||||
img_str += f"\n"
|
||||
model_description = f"""
|
||||
# Textual inversion text2image fine-tuning - {repo_id}
|
||||
These are textual inversion adaption weights for {base_model}. You can find some example images in the following. \n
|
||||
{img_str}
|
||||
"""
|
||||
with open(os.path.join(repo_folder, "README.md"), "w") as f:
|
||||
f.write(yaml + model_card)
|
||||
model_card = load_or_create_model_card(
|
||||
repo_id_or_path=repo_id,
|
||||
from_training=True,
|
||||
license="creativeml-openrail-m",
|
||||
base_model=base_model,
|
||||
model_description=model_description,
|
||||
inference=True,
|
||||
)
|
||||
|
||||
tags = ["stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "diffusers", "textual_inversion"]
|
||||
model_card = populate_model_card(model_card, tags=tags)
|
||||
|
||||
model_card.save(os.path.join(repo_folder, "README.md"))
|
||||
|
||||
|
||||
def log_validation(text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch):
|
||||
|
||||
@@ -32,8 +32,6 @@ from accelerate import Accelerator
|
||||
from accelerate.logging import get_logger
|
||||
from accelerate.utils import ProjectConfiguration, set_seed
|
||||
from huggingface_hub import create_repo, upload_folder
|
||||
|
||||
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
|
||||
from packaging import version
|
||||
from PIL import Image
|
||||
from torch.utils.data import Dataset
|
||||
@@ -51,6 +49,7 @@ from diffusers import (
|
||||
)
|
||||
from diffusers.optimization import get_scheduler
|
||||
from diffusers.utils import check_min_version, is_wandb_available
|
||||
from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
|
||||
|
||||
@@ -88,26 +87,31 @@ def save_model_card(repo_id: str, images=None, base_model=str, repo_folder=None)
|
||||
image.save(os.path.join(repo_folder, f"image_{i}.png"))
|
||||
img_str += f"\n"
|
||||
|
||||
yaml = f"""
|
||||
---
|
||||
license: creativeml-openrail-m
|
||||
base_model: {base_model}
|
||||
tags:
|
||||
- stable-diffusion
|
||||
- stable-diffusion-diffusers
|
||||
- text-to-image
|
||||
- diffusers
|
||||
- textual_inversion
|
||||
inference: true
|
||||
---
|
||||
"""
|
||||
model_card = f"""
|
||||
model_description = f"""
|
||||
# Textual inversion text2image fine-tuning - {repo_id}
|
||||
These are textual inversion adaption weights for {base_model}. You can find some example images in the following. \n
|
||||
{img_str}
|
||||
"""
|
||||
with open(os.path.join(repo_folder, "README.md"), "w") as f:
|
||||
f.write(yaml + model_card)
|
||||
model_card = load_or_create_model_card(
|
||||
repo_id_or_path=repo_id,
|
||||
from_training=True,
|
||||
license="creativeml-openrail-m",
|
||||
base_model=base_model,
|
||||
model_description=model_description,
|
||||
inference=True,
|
||||
)
|
||||
|
||||
tags = [
|
||||
"stable-diffusion-xl",
|
||||
"stable-diffusion-xl-diffusers",
|
||||
"text-to-image",
|
||||
"diffusers",
|
||||
"textual_inversion",
|
||||
]
|
||||
|
||||
model_card = populate_model_card(model_card, tags=tags)
|
||||
|
||||
model_card.save(os.path.join(repo_folder, "README.md"))
|
||||
|
||||
|
||||
def log_validation(
|
||||
|
||||
@@ -12,12 +12,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import math
|
||||
import warnings
|
||||
from typing import List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
import PIL.Image
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from PIL import Image, ImageFilter, ImageOps
|
||||
|
||||
from .configuration_utils import ConfigMixin, register_to_config
|
||||
@@ -882,3 +884,107 @@ class VaeImageProcessorLDM3D(VaeImageProcessor):
|
||||
depth = self.binarize(depth)
|
||||
|
||||
return rgb, depth
|
||||
|
||||
|
||||
class IPAdapterMaskProcessor(VaeImageProcessor):
|
||||
"""
|
||||
Image processor for IP Adapter image masks.
|
||||
|
||||
Args:
|
||||
do_resize (`bool`, *optional*, defaults to `True`):
|
||||
Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`.
|
||||
vae_scale_factor (`int`, *optional*, defaults to `8`):
|
||||
VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor.
|
||||
resample (`str`, *optional*, defaults to `lanczos`):
|
||||
Resampling filter to use when resizing the image.
|
||||
do_normalize (`bool`, *optional*, defaults to `False`):
|
||||
Whether to normalize the image to [-1,1].
|
||||
do_binarize (`bool`, *optional*, defaults to `True`):
|
||||
Whether to binarize the image to 0/1.
|
||||
do_convert_grayscale (`bool`, *optional*, defaults to be `True`):
|
||||
Whether to convert the images to grayscale format.
|
||||
|
||||
"""
|
||||
|
||||
config_name = CONFIG_NAME
|
||||
|
||||
@register_to_config
|
||||
def __init__(
|
||||
self,
|
||||
do_resize: bool = True,
|
||||
vae_scale_factor: int = 8,
|
||||
resample: str = "lanczos",
|
||||
do_normalize: bool = False,
|
||||
do_binarize: bool = True,
|
||||
do_convert_grayscale: bool = True,
|
||||
):
|
||||
super().__init__(
|
||||
do_resize=do_resize,
|
||||
vae_scale_factor=vae_scale_factor,
|
||||
resample=resample,
|
||||
do_normalize=do_normalize,
|
||||
do_binarize=do_binarize,
|
||||
do_convert_grayscale=do_convert_grayscale,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def downsample(mask: torch.FloatTensor, batch_size: int, num_queries: int, value_embed_dim: int):
|
||||
"""
|
||||
Downsamples the provided mask tensor to match the expected dimensions for scaled dot-product attention.
|
||||
If the aspect ratio of the mask does not match the aspect ratio of the output image, a warning is issued.
|
||||
|
||||
Args:
|
||||
mask (`torch.FloatTensor`):
|
||||
The input mask tensor generated with `IPAdapterMaskProcessor.preprocess()`.
|
||||
batch_size (`int`):
|
||||
The batch size.
|
||||
num_queries (`int`):
|
||||
The number of queries.
|
||||
value_embed_dim (`int`):
|
||||
The dimensionality of the value embeddings.
|
||||
|
||||
Returns:
|
||||
`torch.FloatTensor`:
|
||||
The downsampled mask tensor.
|
||||
|
||||
"""
|
||||
o_h = mask.shape[1]
|
||||
o_w = mask.shape[2]
|
||||
ratio = o_w / o_h
|
||||
mask_h = int(math.sqrt(num_queries / ratio))
|
||||
mask_h = int(mask_h) + int((num_queries % int(mask_h)) != 0)
|
||||
mask_w = num_queries // mask_h
|
||||
|
||||
mask_downsample = F.interpolate(mask.unsqueeze(0), size=(mask_h, mask_w), mode="bicubic").squeeze(0)
|
||||
|
||||
# Repeat batch_size times
|
||||
if mask_downsample.shape[0] < batch_size:
|
||||
mask_downsample = mask_downsample.repeat(batch_size, 1, 1)
|
||||
|
||||
mask_downsample = mask_downsample.view(mask_downsample.shape[0], -1)
|
||||
|
||||
downsampled_area = mask_h * mask_w
|
||||
# If the output image and the mask do not have the same aspect ratio, tensor shapes will not match
|
||||
# Pad tensor if downsampled_mask.shape[1] is smaller than num_queries
|
||||
if downsampled_area < num_queries:
|
||||
warnings.warn(
|
||||
"The aspect ratio of the mask does not match the aspect ratio of the output image. "
|
||||
"Please update your masks or adjust the output size for optimal performance.",
|
||||
UserWarning,
|
||||
)
|
||||
mask_downsample = F.pad(mask_downsample, (0, num_queries - mask_downsample.shape[1]), value=0.0)
|
||||
# Discard last embeddings if downsampled_mask.shape[1] is bigger than num_queries
|
||||
if downsampled_area > num_queries:
|
||||
warnings.warn(
|
||||
"The aspect ratio of the mask does not match the aspect ratio of the output image. "
|
||||
"Please update your masks or adjust the output size for optimal performance.",
|
||||
UserWarning,
|
||||
)
|
||||
mask_downsample = mask_downsample[:, :num_queries]
|
||||
|
||||
# Repeat last dimension to match SDPA output shape
|
||||
mask_downsample = mask_downsample.view(mask_downsample.shape[0], mask_downsample.shape[1], 1).repeat(
|
||||
1, 1, value_embed_dim
|
||||
)
|
||||
|
||||
return mask_downsample
|
||||
|
||||
@@ -138,7 +138,12 @@ class FromOriginalVAEMixin:
|
||||
image_size = kwargs.pop("image_size", None)
|
||||
scaling_factor = kwargs.pop("scaling_factor", None)
|
||||
component = create_diffusers_vae_model_from_ldm(
|
||||
class_name, original_config, checkpoint, image_size=image_size, scaling_factor=scaling_factor
|
||||
class_name,
|
||||
original_config,
|
||||
checkpoint,
|
||||
image_size=image_size,
|
||||
scaling_factor=scaling_factor,
|
||||
torch_dtype=torch_dtype,
|
||||
)
|
||||
vae = component["vae"]
|
||||
if torch_dtype is not None:
|
||||
|
||||
@@ -128,7 +128,12 @@ class FromOriginalControlNetMixin:
|
||||
image_size = kwargs.pop("image_size", None)
|
||||
|
||||
component = create_diffusers_controlnet_model_from_ldm(
|
||||
class_name, original_config, checkpoint, upcast_attention=upcast_attention, image_size=image_size
|
||||
class_name,
|
||||
original_config,
|
||||
checkpoint,
|
||||
upcast_attention=upcast_attention,
|
||||
image_size=image_size,
|
||||
torch_dtype=torch_dtype,
|
||||
)
|
||||
controlnet = component["controlnet"]
|
||||
if torch_dtype is not None:
|
||||
|
||||
@@ -57,14 +57,19 @@ def build_sub_model_components(
|
||||
if component_name == "unet":
|
||||
num_in_channels = kwargs.pop("num_in_channels", None)
|
||||
unet_components = create_diffusers_unet_model_from_ldm(
|
||||
pipeline_class_name, original_config, checkpoint, num_in_channels=num_in_channels, image_size=image_size
|
||||
pipeline_class_name,
|
||||
original_config,
|
||||
checkpoint,
|
||||
num_in_channels=num_in_channels,
|
||||
image_size=image_size,
|
||||
torch_dtype=torch_dtype,
|
||||
)
|
||||
return unet_components
|
||||
|
||||
if component_name == "vae":
|
||||
scaling_factor = kwargs.get("scaling_factor", None)
|
||||
vae_components = create_diffusers_vae_model_from_ldm(
|
||||
pipeline_class_name, original_config, checkpoint, image_size, scaling_factor
|
||||
pipeline_class_name, original_config, checkpoint, image_size, scaling_factor, torch_dtype
|
||||
)
|
||||
return vae_components
|
||||
|
||||
@@ -89,6 +94,7 @@ def build_sub_model_components(
|
||||
checkpoint,
|
||||
model_type=model_type,
|
||||
local_files_only=local_files_only,
|
||||
torch_dtype=torch_dtype,
|
||||
)
|
||||
return text_encoder_components
|
||||
|
||||
@@ -261,6 +267,7 @@ class FromSingleFileMixin:
|
||||
image_size=image_size,
|
||||
load_safety_checker=load_safety_checker,
|
||||
local_files_only=local_files_only,
|
||||
torch_dtype=torch_dtype,
|
||||
**kwargs,
|
||||
)
|
||||
if not components:
|
||||
|
||||
@@ -856,7 +856,7 @@ def convert_controlnet_checkpoint(
|
||||
|
||||
|
||||
def create_diffusers_controlnet_model_from_ldm(
|
||||
pipeline_class_name, original_config, checkpoint, upcast_attention=False, image_size=None
|
||||
pipeline_class_name, original_config, checkpoint, upcast_attention=False, image_size=None, torch_dtype=None
|
||||
):
|
||||
# import here to avoid circular imports
|
||||
from ..models import ControlNetModel
|
||||
@@ -875,7 +875,9 @@ def create_diffusers_controlnet_model_from_ldm(
|
||||
if is_accelerate_available():
|
||||
from ..models.modeling_utils import load_model_dict_into_meta
|
||||
|
||||
unexpected_keys = load_model_dict_into_meta(controlnet, diffusers_format_controlnet_checkpoint)
|
||||
unexpected_keys = load_model_dict_into_meta(
|
||||
controlnet, diffusers_format_controlnet_checkpoint, torch_dtype=torch_dtype
|
||||
)
|
||||
if controlnet._keys_to_ignore_on_load_unexpected is not None:
|
||||
for pat in controlnet._keys_to_ignore_on_load_unexpected:
|
||||
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
|
||||
@@ -887,6 +889,9 @@ def create_diffusers_controlnet_model_from_ldm(
|
||||
else:
|
||||
controlnet.load_state_dict(diffusers_format_controlnet_checkpoint)
|
||||
|
||||
if torch_dtype is not None:
|
||||
controlnet = controlnet.to(torch_dtype)
|
||||
|
||||
return {"controlnet": controlnet}
|
||||
|
||||
|
||||
@@ -1022,7 +1027,7 @@ def convert_ldm_vae_checkpoint(checkpoint, config):
|
||||
return new_checkpoint
|
||||
|
||||
|
||||
def create_text_encoder_from_ldm_clip_checkpoint(config_name, checkpoint, local_files_only=False):
|
||||
def create_text_encoder_from_ldm_clip_checkpoint(config_name, checkpoint, local_files_only=False, torch_dtype=None):
|
||||
try:
|
||||
config = CLIPTextConfig.from_pretrained(config_name, local_files_only=local_files_only)
|
||||
except Exception:
|
||||
@@ -1048,7 +1053,7 @@ def create_text_encoder_from_ldm_clip_checkpoint(config_name, checkpoint, local_
|
||||
if is_accelerate_available():
|
||||
from ..models.modeling_utils import load_model_dict_into_meta
|
||||
|
||||
unexpected_keys = load_model_dict_into_meta(text_model, text_model_dict)
|
||||
unexpected_keys = load_model_dict_into_meta(text_model, text_model_dict, dtype=torch_dtype)
|
||||
if text_model._keys_to_ignore_on_load_unexpected is not None:
|
||||
for pat in text_model._keys_to_ignore_on_load_unexpected:
|
||||
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
|
||||
@@ -1063,6 +1068,9 @@ def create_text_encoder_from_ldm_clip_checkpoint(config_name, checkpoint, local_
|
||||
|
||||
text_model.load_state_dict(text_model_dict)
|
||||
|
||||
if torch_dtype is not None:
|
||||
text_model = text_model.to(torch_dtype)
|
||||
|
||||
return text_model
|
||||
|
||||
|
||||
@@ -1072,6 +1080,7 @@ def create_text_encoder_from_open_clip_checkpoint(
|
||||
prefix="cond_stage_model.model.",
|
||||
has_projection=False,
|
||||
local_files_only=False,
|
||||
torch_dtype=None,
|
||||
**config_kwargs,
|
||||
):
|
||||
try:
|
||||
@@ -1139,7 +1148,7 @@ def create_text_encoder_from_open_clip_checkpoint(
|
||||
if is_accelerate_available():
|
||||
from ..models.modeling_utils import load_model_dict_into_meta
|
||||
|
||||
unexpected_keys = load_model_dict_into_meta(text_model, text_model_dict)
|
||||
unexpected_keys = load_model_dict_into_meta(text_model, text_model_dict, dtype=torch_dtype)
|
||||
if text_model._keys_to_ignore_on_load_unexpected is not None:
|
||||
for pat in text_model._keys_to_ignore_on_load_unexpected:
|
||||
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
|
||||
@@ -1155,6 +1164,9 @@ def create_text_encoder_from_open_clip_checkpoint(
|
||||
|
||||
text_model.load_state_dict(text_model_dict)
|
||||
|
||||
if torch_dtype is not None:
|
||||
text_model = text_model.to(torch_dtype)
|
||||
|
||||
return text_model
|
||||
|
||||
|
||||
@@ -1166,6 +1178,7 @@ def create_diffusers_unet_model_from_ldm(
|
||||
upcast_attention=False,
|
||||
extract_ema=False,
|
||||
image_size=None,
|
||||
torch_dtype=None,
|
||||
):
|
||||
from ..models import UNet2DConditionModel
|
||||
|
||||
@@ -1198,7 +1211,7 @@ def create_diffusers_unet_model_from_ldm(
|
||||
if is_accelerate_available():
|
||||
from ..models.modeling_utils import load_model_dict_into_meta
|
||||
|
||||
unexpected_keys = load_model_dict_into_meta(unet, diffusers_format_unet_checkpoint)
|
||||
unexpected_keys = load_model_dict_into_meta(unet, diffusers_format_unet_checkpoint, dtype=torch_dtype)
|
||||
if unet._keys_to_ignore_on_load_unexpected is not None:
|
||||
for pat in unet._keys_to_ignore_on_load_unexpected:
|
||||
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
|
||||
@@ -1210,11 +1223,14 @@ def create_diffusers_unet_model_from_ldm(
|
||||
else:
|
||||
unet.load_state_dict(diffusers_format_unet_checkpoint)
|
||||
|
||||
if torch_dtype is not None:
|
||||
unet = unet.to(torch_dtype)
|
||||
|
||||
return {"unet": unet}
|
||||
|
||||
|
||||
def create_diffusers_vae_model_from_ldm(
|
||||
pipeline_class_name, original_config, checkpoint, image_size=None, scaling_factor=None
|
||||
pipeline_class_name, original_config, checkpoint, image_size=None, scaling_factor=None, torch_dtype=None
|
||||
):
|
||||
# import here to avoid circular imports
|
||||
from ..models import AutoencoderKL
|
||||
@@ -1231,7 +1247,7 @@ def create_diffusers_vae_model_from_ldm(
|
||||
if is_accelerate_available():
|
||||
from ..models.modeling_utils import load_model_dict_into_meta
|
||||
|
||||
unexpected_keys = load_model_dict_into_meta(vae, diffusers_format_vae_checkpoint)
|
||||
unexpected_keys = load_model_dict_into_meta(vae, diffusers_format_vae_checkpoint, dtype=torch_dtype)
|
||||
if vae._keys_to_ignore_on_load_unexpected is not None:
|
||||
for pat in vae._keys_to_ignore_on_load_unexpected:
|
||||
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
|
||||
@@ -1243,6 +1259,9 @@ def create_diffusers_vae_model_from_ldm(
|
||||
else:
|
||||
vae.load_state_dict(diffusers_format_vae_checkpoint)
|
||||
|
||||
if torch_dtype is not None:
|
||||
vae = vae.to(torch_dtype)
|
||||
|
||||
return {"vae": vae}
|
||||
|
||||
|
||||
@@ -1251,6 +1270,7 @@ def create_text_encoders_and_tokenizers_from_ldm(
|
||||
checkpoint,
|
||||
model_type=None,
|
||||
local_files_only=False,
|
||||
torch_dtype=None,
|
||||
):
|
||||
model_type = infer_model_type(original_config, model_type=model_type)
|
||||
|
||||
@@ -1260,7 +1280,7 @@ def create_text_encoders_and_tokenizers_from_ldm(
|
||||
|
||||
try:
|
||||
text_encoder = create_text_encoder_from_open_clip_checkpoint(
|
||||
config_name, checkpoint, local_files_only=local_files_only, **config_kwargs
|
||||
config_name, checkpoint, local_files_only=local_files_only, torch_dtype=torch_dtype, **config_kwargs
|
||||
)
|
||||
tokenizer = CLIPTokenizer.from_pretrained(
|
||||
config_name, subfolder="tokenizer", local_files_only=local_files_only
|
||||
@@ -1279,6 +1299,7 @@ def create_text_encoders_and_tokenizers_from_ldm(
|
||||
config_name,
|
||||
checkpoint,
|
||||
local_files_only=local_files_only,
|
||||
torch_dtype=torch_dtype,
|
||||
)
|
||||
tokenizer = CLIPTokenizer.from_pretrained(config_name, local_files_only=local_files_only)
|
||||
|
||||
@@ -1302,6 +1323,7 @@ def create_text_encoders_and_tokenizers_from_ldm(
|
||||
prefix=prefix,
|
||||
has_projection=True,
|
||||
local_files_only=local_files_only,
|
||||
torch_dtype=torch_dtype,
|
||||
**config_kwargs,
|
||||
)
|
||||
except Exception:
|
||||
@@ -1322,7 +1344,7 @@ def create_text_encoders_and_tokenizers_from_ldm(
|
||||
config_name = "openai/clip-vit-large-patch14"
|
||||
tokenizer = CLIPTokenizer.from_pretrained(config_name, local_files_only=local_files_only)
|
||||
text_encoder = create_text_encoder_from_ldm_clip_checkpoint(
|
||||
config_name, checkpoint, local_files_only=local_files_only
|
||||
config_name, checkpoint, local_files_only=local_files_only, torch_dtype=torch_dtype
|
||||
)
|
||||
|
||||
except Exception:
|
||||
@@ -1341,6 +1363,7 @@ def create_text_encoders_and_tokenizers_from_ldm(
|
||||
prefix=prefix,
|
||||
has_projection=True,
|
||||
local_files_only=local_files_only,
|
||||
torch_dtype=torch_dtype,
|
||||
**config_kwargs,
|
||||
)
|
||||
except Exception:
|
||||
|
||||
@@ -19,6 +19,7 @@ import torch
|
||||
import torch.nn.functional as F
|
||||
from torch import nn
|
||||
|
||||
from ..image_processor import IPAdapterMaskProcessor
|
||||
from ..utils import USE_PEFT_BACKEND, deprecate, logging
|
||||
from ..utils.import_utils import is_xformers_available
|
||||
from ..utils.torch_utils import maybe_allow_in_graph
|
||||
@@ -1809,24 +1810,7 @@ class SpatialNorm(nn.Module):
|
||||
return new_f
|
||||
|
||||
|
||||
## Deprecated
|
||||
class LoRAAttnProcessor(nn.Module):
|
||||
r"""
|
||||
Processor for implementing the LoRA attention mechanism.
|
||||
|
||||
Args:
|
||||
hidden_size (`int`, *optional*):
|
||||
The hidden size of the attention layer.
|
||||
cross_attention_dim (`int`, *optional*):
|
||||
The number of channels in the `encoder_hidden_states`.
|
||||
rank (`int`, defaults to 4):
|
||||
The dimension of the LoRA update matrices.
|
||||
network_alpha (`int`, *optional*):
|
||||
Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs.
|
||||
kwargs (`dict`):
|
||||
Additional keyword arguments to pass to the `LoRALinearLayer` layers.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size: int,
|
||||
@@ -1835,6 +1819,9 @@ class LoRAAttnProcessor(nn.Module):
|
||||
network_alpha: Optional[int] = None,
|
||||
**kwargs,
|
||||
):
|
||||
deprecation_message = "Using LoRAAttnProcessor is deprecated. Please use the PEFT backend for all things LoRA. You can install PEFT by running `pip install peft`."
|
||||
deprecate("LoRAAttnProcessor", "0.30.0", deprecation_message, standard_warn=False)
|
||||
|
||||
super().__init__()
|
||||
|
||||
self.hidden_size = hidden_size
|
||||
@@ -1883,23 +1870,6 @@ class LoRAAttnProcessor(nn.Module):
|
||||
|
||||
|
||||
class LoRAAttnProcessor2_0(nn.Module):
|
||||
r"""
|
||||
Processor for implementing the LoRA attention mechanism using PyTorch 2.0's memory-efficient scaled dot-product
|
||||
attention.
|
||||
|
||||
Args:
|
||||
hidden_size (`int`):
|
||||
The hidden size of the attention layer.
|
||||
cross_attention_dim (`int`, *optional*):
|
||||
The number of channels in the `encoder_hidden_states`.
|
||||
rank (`int`, defaults to 4):
|
||||
The dimension of the LoRA update matrices.
|
||||
network_alpha (`int`, *optional*):
|
||||
Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs.
|
||||
kwargs (`dict`):
|
||||
Additional keyword arguments to pass to the `LoRALinearLayer` layers.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size: int,
|
||||
@@ -1908,6 +1878,9 @@ class LoRAAttnProcessor2_0(nn.Module):
|
||||
network_alpha: Optional[int] = None,
|
||||
**kwargs,
|
||||
):
|
||||
deprecation_message = "Using LoRAAttnProcessor is deprecated. Please use the PEFT backend for all things LoRA. You can install PEFT by running `pip install peft`."
|
||||
deprecate("LoRAAttnProcessor2_0", "0.30.0", deprecation_message, standard_warn=False)
|
||||
|
||||
super().__init__()
|
||||
if not hasattr(F, "scaled_dot_product_attention"):
|
||||
raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
|
||||
@@ -2135,12 +2108,13 @@ class IPAdapterAttnProcessor(nn.Module):
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
attn,
|
||||
hidden_states,
|
||||
encoder_hidden_states=None,
|
||||
attention_mask=None,
|
||||
temb=None,
|
||||
scale=1.0,
|
||||
attn: Attention,
|
||||
hidden_states: torch.FloatTensor,
|
||||
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
||||
attention_mask: Optional[torch.FloatTensor] = None,
|
||||
temb: Optional[torch.FloatTensor] = None,
|
||||
scale: float = 1.0,
|
||||
ip_adapter_masks: Optional[torch.FloatTensor] = None,
|
||||
):
|
||||
residual = hidden_states
|
||||
|
||||
@@ -2195,9 +2169,22 @@ class IPAdapterAttnProcessor(nn.Module):
|
||||
hidden_states = torch.bmm(attention_probs, value)
|
||||
hidden_states = attn.batch_to_head_dim(hidden_states)
|
||||
|
||||
if ip_adapter_masks is not None:
|
||||
if not isinstance(ip_adapter_masks, torch.Tensor) or ip_adapter_masks.ndim != 4:
|
||||
raise ValueError(
|
||||
" ip_adapter_mask should be a tensor with shape [num_ip_adapter, 1, height, width]."
|
||||
" Please use `IPAdapterMaskProcessor` to preprocess your mask"
|
||||
)
|
||||
if len(ip_adapter_masks) != len(self.scale):
|
||||
raise ValueError(
|
||||
f"Number of ip_adapter_masks ({len(ip_adapter_masks)}) must match number of IP-Adapters ({len(self.scale)})"
|
||||
)
|
||||
else:
|
||||
ip_adapter_masks = [None] * len(self.scale)
|
||||
|
||||
# for ip-adapter
|
||||
for current_ip_hidden_states, scale, to_k_ip, to_v_ip in zip(
|
||||
ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip
|
||||
for current_ip_hidden_states, scale, to_k_ip, to_v_ip, mask in zip(
|
||||
ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip, ip_adapter_masks
|
||||
):
|
||||
ip_key = to_k_ip(current_ip_hidden_states)
|
||||
ip_value = to_v_ip(current_ip_hidden_states)
|
||||
@@ -2209,6 +2196,15 @@ class IPAdapterAttnProcessor(nn.Module):
|
||||
current_ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
|
||||
current_ip_hidden_states = attn.batch_to_head_dim(current_ip_hidden_states)
|
||||
|
||||
if mask is not None:
|
||||
mask_downsample = IPAdapterMaskProcessor.downsample(
|
||||
mask, batch_size, current_ip_hidden_states.shape[1], current_ip_hidden_states.shape[2]
|
||||
)
|
||||
|
||||
mask_downsample = mask_downsample.to(dtype=query.dtype, device=query.device)
|
||||
|
||||
current_ip_hidden_states = current_ip_hidden_states * mask_downsample
|
||||
|
||||
hidden_states = hidden_states + scale * current_ip_hidden_states
|
||||
|
||||
# linear proj
|
||||
@@ -2272,12 +2268,13 @@ class IPAdapterAttnProcessor2_0(torch.nn.Module):
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
attn,
|
||||
hidden_states,
|
||||
encoder_hidden_states=None,
|
||||
attention_mask=None,
|
||||
temb=None,
|
||||
scale=1.0,
|
||||
attn: Attention,
|
||||
hidden_states: torch.FloatTensor,
|
||||
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
||||
attention_mask: Optional[torch.FloatTensor] = None,
|
||||
temb: Optional[torch.FloatTensor] = None,
|
||||
scale: float = 1.0,
|
||||
ip_adapter_masks: Optional[torch.FloatTensor] = None,
|
||||
):
|
||||
residual = hidden_states
|
||||
|
||||
@@ -2346,9 +2343,22 @@ class IPAdapterAttnProcessor2_0(torch.nn.Module):
|
||||
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
|
||||
hidden_states = hidden_states.to(query.dtype)
|
||||
|
||||
if ip_adapter_masks is not None:
|
||||
if not isinstance(ip_adapter_masks, torch.Tensor) or ip_adapter_masks.ndim != 4:
|
||||
raise ValueError(
|
||||
" ip_adapter_mask should be a tensor with shape [num_ip_adapter, 1, height, width]."
|
||||
" Please use `IPAdapterMaskProcessor` to preprocess your mask"
|
||||
)
|
||||
if len(ip_adapter_masks) != len(self.scale):
|
||||
raise ValueError(
|
||||
f"Number of ip_adapter_masks ({len(ip_adapter_masks)}) must match number of IP-Adapters ({len(self.scale)})"
|
||||
)
|
||||
else:
|
||||
ip_adapter_masks = [None] * len(self.scale)
|
||||
|
||||
# for ip-adapter
|
||||
for current_ip_hidden_states, scale, to_k_ip, to_v_ip in zip(
|
||||
ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip
|
||||
for current_ip_hidden_states, scale, to_k_ip, to_v_ip, mask in zip(
|
||||
ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip, ip_adapter_masks
|
||||
):
|
||||
ip_key = to_k_ip(current_ip_hidden_states)
|
||||
ip_value = to_v_ip(current_ip_hidden_states)
|
||||
@@ -2367,6 +2377,15 @@ class IPAdapterAttnProcessor2_0(torch.nn.Module):
|
||||
)
|
||||
current_ip_hidden_states = current_ip_hidden_states.to(query.dtype)
|
||||
|
||||
if mask is not None:
|
||||
mask_downsample = IPAdapterMaskProcessor.downsample(
|
||||
mask, batch_size, current_ip_hidden_states.shape[1], current_ip_hidden_states.shape[2]
|
||||
)
|
||||
|
||||
mask_downsample = mask_downsample.to(dtype=query.dtype, device=query.device)
|
||||
|
||||
current_ip_hidden_states = current_ip_hidden_states * mask_downsample
|
||||
|
||||
hidden_states = hidden_states + scale * current_ip_hidden_states
|
||||
|
||||
# linear proj
|
||||
|
||||
@@ -55,6 +55,42 @@ from .unet_2d_blocks import (
|
||||
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
||||
|
||||
|
||||
def _check_to_assign_to_num_attention_heads(attention_head_dim, down_block_types, mid_block_type, up_block_types):
|
||||
# If a new config is passed with the correct value for `num_attention_heads` and `attention_head_dim` is None,
|
||||
# we do not need to reassign anything
|
||||
if attention_head_dim is None:
|
||||
return False
|
||||
|
||||
elif (
|
||||
"CrossAttnDownBlock2D" in down_block_types
|
||||
or "CrossAttnDownBlock2D" in up_block_types
|
||||
or mid_block_type == "UNetMidBlock2DCrossAttn"
|
||||
):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _set_attention_parameters(num_attention_heads, attention_head_dim, block_out_channels):
|
||||
if num_attention_heads is None:
|
||||
if isinstance(attention_head_dim, int):
|
||||
attention_head_dim = (attention_head_dim,) * len(block_out_channels)
|
||||
|
||||
num_attention_heads = [
|
||||
out_channel // attention_head_dim[i] for i, out_channel in enumerate(block_out_channels)
|
||||
]
|
||||
|
||||
elif attention_head_dim is None:
|
||||
if isinstance(num_attention_heads, int):
|
||||
num_attention_heads = (num_attention_heads,) * len(block_out_channels)
|
||||
|
||||
attention_head_dim = [
|
||||
out_channel // num_attention_heads[i] for i, out_channel in enumerate(block_out_channels)
|
||||
]
|
||||
|
||||
return num_attention_heads, attention_head_dim
|
||||
|
||||
|
||||
@dataclass
|
||||
class UNet2DConditionOutput(BaseOutput):
|
||||
"""
|
||||
@@ -225,18 +261,22 @@ class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin,
|
||||
|
||||
self.sample_size = sample_size
|
||||
|
||||
if num_attention_heads is not None:
|
||||
if (num_attention_heads is not None) and (attention_head_dim is not None):
|
||||
raise ValueError(
|
||||
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
|
||||
"It is not possible to configure the UNet with both `num_attention_heads` and `attention_head_dim` at the same time. Please set only one of these values."
|
||||
)
|
||||
|
||||
# If `num_attention_heads` is not defined (which is the case for most models)
|
||||
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
|
||||
# The reason for this behavior is to correct for incorrectly named variables that were introduced
|
||||
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
|
||||
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
|
||||
# which is why we correct for the naming here.
|
||||
num_attention_heads = num_attention_heads or attention_head_dim
|
||||
_should_assign_num_attention_heads = _check_to_assign_to_num_attention_heads(
|
||||
attention_head_dim, down_block_types, mid_block_type, up_block_types
|
||||
)
|
||||
if _should_assign_num_attention_heads:
|
||||
num_attention_heads = attention_head_dim
|
||||
attention_head_dim = None
|
||||
logger.warning(
|
||||
"`attention_head_dim` has been incorrectly configured for this model and will be reassigned to `num_attention_heads`"
|
||||
"Further referance: https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131"
|
||||
)
|
||||
self.register_to_config(num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim)
|
||||
|
||||
# Check inputs
|
||||
if len(down_block_types) != len(up_block_types):
|
||||
@@ -254,16 +294,6 @@ class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin,
|
||||
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
|
||||
)
|
||||
|
||||
if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
|
||||
raise ValueError(
|
||||
f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
|
||||
)
|
||||
|
||||
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
|
||||
raise ValueError(
|
||||
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
|
||||
)
|
||||
|
||||
if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
|
||||
raise ValueError(
|
||||
f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}."
|
||||
@@ -410,6 +440,11 @@ class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin,
|
||||
self.down_blocks = nn.ModuleList([])
|
||||
self.up_blocks = nn.ModuleList([])
|
||||
|
||||
# Compute both attention_head_dim and num_attention_heads for each block
|
||||
num_attention_heads, attention_head_dim = _set_attention_parameters(
|
||||
num_attention_heads, attention_head_dim, block_out_channels
|
||||
)
|
||||
|
||||
if isinstance(only_cross_attention, bool):
|
||||
if mid_block_only_cross_attention is None:
|
||||
mid_block_only_cross_attention = only_cross_attention
|
||||
@@ -419,12 +454,6 @@ class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin,
|
||||
if mid_block_only_cross_attention is None:
|
||||
mid_block_only_cross_attention = False
|
||||
|
||||
if isinstance(num_attention_heads, int):
|
||||
num_attention_heads = (num_attention_heads,) * len(down_block_types)
|
||||
|
||||
if isinstance(attention_head_dim, int):
|
||||
attention_head_dim = (attention_head_dim,) * len(down_block_types)
|
||||
|
||||
if isinstance(cross_attention_dim, int):
|
||||
cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ class UNet3DConditionOutput(BaseOutput):
|
||||
The output of [`UNet3DConditionModel`].
|
||||
|
||||
Args:
|
||||
sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
|
||||
sample (`torch.FloatTensor` of shape `(batch_size, num_channels, num_frames, height, width)`):
|
||||
The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
|
||||
"""
|
||||
|
||||
@@ -74,9 +74,9 @@ class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin)
|
||||
Height and width of input/output sample.
|
||||
in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.
|
||||
out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.
|
||||
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
|
||||
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D")`):
|
||||
The tuple of downsample blocks to use.
|
||||
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
|
||||
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D")`):
|
||||
The tuple of upsample blocks to use.
|
||||
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
|
||||
The tuple of output channels for each block.
|
||||
@@ -87,8 +87,8 @@ class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin)
|
||||
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
|
||||
If `None`, normalization and activation layers is skipped in post-processing.
|
||||
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
|
||||
cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.
|
||||
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
|
||||
cross_attention_dim (`int`, *optional*, defaults to 1024): The dimension of the cross attention features.
|
||||
attention_head_dim (`int`, *optional*, defaults to 64): The dimension of the attention heads.
|
||||
num_attention_heads (`int`, *optional*): The number of attention heads.
|
||||
"""
|
||||
|
||||
@@ -533,7 +533,7 @@ class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin)
|
||||
|
||||
Args:
|
||||
sample (`torch.FloatTensor`):
|
||||
The noisy input tensor with the following shape `(batch, num_frames, channel, height, width`.
|
||||
The noisy input tensor with the following shape `(batch, num_channels, num_frames, height, width`.
|
||||
timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
|
||||
encoder_hidden_states (`torch.FloatTensor`):
|
||||
The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
|
||||
|
||||
@@ -13,12 +13,10 @@
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
import math
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
from typing import Any, Callable, Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.fft as fft
|
||||
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
||||
|
||||
from ...image_processor import PipelineImageInput, VaeImageProcessor
|
||||
@@ -43,6 +41,7 @@ from ...utils import (
|
||||
unscale_lora_layers,
|
||||
)
|
||||
from ...utils.torch_utils import randn_tensor
|
||||
from ..free_init_utils import FreeInitMixin
|
||||
from ..pipeline_utils import DiffusionPipeline
|
||||
from .pipeline_output import AnimateDiffPipelineOutput
|
||||
|
||||
@@ -87,72 +86,9 @@ def tensor2vid(video: torch.Tensor, processor: "VaeImageProcessor", output_type:
|
||||
return outputs
|
||||
|
||||
|
||||
def _get_freeinit_freq_filter(
|
||||
shape: Tuple[int, ...],
|
||||
device: Union[str, torch.dtype],
|
||||
filter_type: str,
|
||||
order: float,
|
||||
spatial_stop_frequency: float,
|
||||
temporal_stop_frequency: float,
|
||||
) -> torch.Tensor:
|
||||
r"""Returns the FreeInit filter based on filter type and other input conditions."""
|
||||
|
||||
T, H, W = shape[-3], shape[-2], shape[-1]
|
||||
mask = torch.zeros(shape)
|
||||
|
||||
if spatial_stop_frequency == 0 or temporal_stop_frequency == 0:
|
||||
return mask
|
||||
|
||||
if filter_type == "butterworth":
|
||||
|
||||
def retrieve_mask(x):
|
||||
return 1 / (1 + (x / spatial_stop_frequency**2) ** order)
|
||||
elif filter_type == "gaussian":
|
||||
|
||||
def retrieve_mask(x):
|
||||
return math.exp(-1 / (2 * spatial_stop_frequency**2) * x)
|
||||
elif filter_type == "ideal":
|
||||
|
||||
def retrieve_mask(x):
|
||||
return 1 if x <= spatial_stop_frequency * 2 else 0
|
||||
else:
|
||||
raise NotImplementedError("`filter_type` must be one of gaussian, butterworth or ideal")
|
||||
|
||||
for t in range(T):
|
||||
for h in range(H):
|
||||
for w in range(W):
|
||||
d_square = (
|
||||
((spatial_stop_frequency / temporal_stop_frequency) * (2 * t / T - 1)) ** 2
|
||||
+ (2 * h / H - 1) ** 2
|
||||
+ (2 * w / W - 1) ** 2
|
||||
)
|
||||
mask[..., t, h, w] = retrieve_mask(d_square)
|
||||
|
||||
return mask.to(device)
|
||||
|
||||
|
||||
def _freq_mix_3d(x: torch.Tensor, noise: torch.Tensor, LPF: torch.Tensor) -> torch.Tensor:
|
||||
r"""Noise reinitialization."""
|
||||
# FFT
|
||||
x_freq = fft.fftn(x, dim=(-3, -2, -1))
|
||||
x_freq = fft.fftshift(x_freq, dim=(-3, -2, -1))
|
||||
noise_freq = fft.fftn(noise, dim=(-3, -2, -1))
|
||||
noise_freq = fft.fftshift(noise_freq, dim=(-3, -2, -1))
|
||||
|
||||
# frequency mix
|
||||
HPF = 1 - LPF
|
||||
x_freq_low = x_freq * LPF
|
||||
noise_freq_high = noise_freq * HPF
|
||||
x_freq_mixed = x_freq_low + noise_freq_high # mix in freq domain
|
||||
|
||||
# IFFT
|
||||
x_freq_mixed = fft.ifftshift(x_freq_mixed, dim=(-3, -2, -1))
|
||||
x_mixed = fft.ifftn(x_freq_mixed, dim=(-3, -2, -1)).real
|
||||
|
||||
return x_mixed
|
||||
|
||||
|
||||
class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin):
|
||||
class AnimateDiffPipeline(
|
||||
DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin, FreeInitMixin
|
||||
):
|
||||
r"""
|
||||
Pipeline for text-to-video generation.
|
||||
|
||||
@@ -182,7 +118,7 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
||||
"""
|
||||
|
||||
model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
|
||||
_optional_components = ["feature_extractor", "image_encoder"]
|
||||
_optional_components = ["feature_extractor", "image_encoder", "motion_adapter"]
|
||||
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
||||
|
||||
def __init__(
|
||||
@@ -204,7 +140,8 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
||||
image_encoder: CLIPVisionModelWithProjection = None,
|
||||
):
|
||||
super().__init__()
|
||||
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
|
||||
if isinstance(unet, UNet2DConditionModel):
|
||||
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
|
||||
|
||||
self.register_modules(
|
||||
vae=vae,
|
||||
@@ -530,63 +467,10 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
||||
raise ValueError("The pipeline must have `unet` for using FreeU.")
|
||||
self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
|
||||
|
||||
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu
|
||||
def disable_freeu(self):
|
||||
"""Disables the FreeU mechanism if enabled."""
|
||||
self.unet.disable_freeu()
|
||||
|
||||
@property
|
||||
def free_init_enabled(self):
|
||||
return hasattr(self, "_free_init_num_iters") and self._free_init_num_iters is not None
|
||||
|
||||
def enable_free_init(
|
||||
self,
|
||||
num_iters: int = 3,
|
||||
use_fast_sampling: bool = False,
|
||||
method: str = "butterworth",
|
||||
order: int = 4,
|
||||
spatial_stop_frequency: float = 0.25,
|
||||
temporal_stop_frequency: float = 0.25,
|
||||
generator: torch.Generator = None,
|
||||
):
|
||||
"""Enables the FreeInit mechanism as in https://arxiv.org/abs/2312.07537.
|
||||
|
||||
This implementation has been adapted from the [official repository](https://github.com/TianxingWu/FreeInit).
|
||||
|
||||
Args:
|
||||
num_iters (`int`, *optional*, defaults to `3`):
|
||||
Number of FreeInit noise re-initialization iterations.
|
||||
use_fast_sampling (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not to speedup sampling procedure at the cost of probably lower quality results. Enables
|
||||
the "Coarse-to-Fine Sampling" strategy, as mentioned in the paper, if set to `True`.
|
||||
method (`str`, *optional*, defaults to `butterworth`):
|
||||
Must be one of `butterworth`, `ideal` or `gaussian` to use as the filtering method for the
|
||||
FreeInit low pass filter.
|
||||
order (`int`, *optional*, defaults to `4`):
|
||||
Order of the filter used in `butterworth` method. Larger values lead to `ideal` method behaviour
|
||||
whereas lower values lead to `gaussian` method behaviour.
|
||||
spatial_stop_frequency (`float`, *optional*, defaults to `0.25`):
|
||||
Normalized stop frequency for spatial dimensions. Must be between 0 to 1. Referred to as `d_s` in
|
||||
the original implementation.
|
||||
temporal_stop_frequency (`float`, *optional*, defaults to `0.25`):
|
||||
Normalized stop frequency for temporal dimensions. Must be between 0 to 1. Referred to as `d_t` in
|
||||
the original implementation.
|
||||
generator (`torch.Generator`, *optional*, defaults to `0.25`):
|
||||
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
||||
FreeInit generation deterministic.
|
||||
"""
|
||||
self._free_init_num_iters = num_iters
|
||||
self._free_init_use_fast_sampling = use_fast_sampling
|
||||
self._free_init_method = method
|
||||
self._free_init_order = order
|
||||
self._free_init_spatial_stop_frequency = spatial_stop_frequency
|
||||
self._free_init_temporal_stop_frequency = temporal_stop_frequency
|
||||
self._free_init_generator = generator
|
||||
|
||||
def disable_free_init(self):
|
||||
"""Disables the FreeInit mechanism if enabled."""
|
||||
self._free_init_num_iters = None
|
||||
|
||||
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
||||
def prepare_extra_step_kwargs(self, generator, eta):
|
||||
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
||||
@@ -691,158 +575,6 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
||||
latents = latents * self.scheduler.init_noise_sigma
|
||||
return latents
|
||||
|
||||
def _denoise_loop(
|
||||
self,
|
||||
timesteps,
|
||||
num_inference_steps,
|
||||
do_classifier_free_guidance,
|
||||
guidance_scale,
|
||||
num_warmup_steps,
|
||||
prompt_embeds,
|
||||
negative_prompt_embeds,
|
||||
latents,
|
||||
cross_attention_kwargs,
|
||||
added_cond_kwargs,
|
||||
extra_step_kwargs,
|
||||
callback,
|
||||
callback_steps,
|
||||
callback_on_step_end,
|
||||
callback_on_step_end_tensor_inputs,
|
||||
):
|
||||
"""Denoising loop for AnimateDiff."""
|
||||
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
||||
for i, t in enumerate(timesteps):
|
||||
# expand the latents if we are doing classifier free guidance
|
||||
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
||||
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
||||
|
||||
# predict the noise residual
|
||||
noise_pred = self.unet(
|
||||
latent_model_input,
|
||||
t,
|
||||
encoder_hidden_states=prompt_embeds,
|
||||
cross_attention_kwargs=cross_attention_kwargs,
|
||||
added_cond_kwargs=added_cond_kwargs,
|
||||
).sample
|
||||
|
||||
# perform guidance
|
||||
if do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
||||
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
||||
|
||||
# compute the previous noisy sample x_t -> x_t-1
|
||||
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
||||
|
||||
if callback_on_step_end is not None:
|
||||
callback_kwargs = {}
|
||||
for k in callback_on_step_end_tensor_inputs:
|
||||
callback_kwargs[k] = locals()[k]
|
||||
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
||||
|
||||
latents = callback_outputs.pop("latents", latents)
|
||||
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
||||
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
||||
|
||||
# call the callback, if provided
|
||||
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
||||
progress_bar.update()
|
||||
if callback is not None and i % callback_steps == 0:
|
||||
callback(i, t, latents)
|
||||
|
||||
return latents
|
||||
|
||||
def _free_init_loop(
|
||||
self,
|
||||
height,
|
||||
width,
|
||||
num_frames,
|
||||
num_channels_latents,
|
||||
batch_size,
|
||||
num_videos_per_prompt,
|
||||
denoise_args,
|
||||
device,
|
||||
):
|
||||
"""Denoising loop for AnimateDiff using FreeInit noise reinitialization technique."""
|
||||
|
||||
latents = denoise_args.get("latents")
|
||||
prompt_embeds = denoise_args.get("prompt_embeds")
|
||||
timesteps = denoise_args.get("timesteps")
|
||||
num_inference_steps = denoise_args.get("num_inference_steps")
|
||||
|
||||
latent_shape = (
|
||||
batch_size * num_videos_per_prompt,
|
||||
num_channels_latents,
|
||||
num_frames,
|
||||
height // self.vae_scale_factor,
|
||||
width // self.vae_scale_factor,
|
||||
)
|
||||
free_init_filter_shape = (
|
||||
1,
|
||||
num_channels_latents,
|
||||
num_frames,
|
||||
height // self.vae_scale_factor,
|
||||
width // self.vae_scale_factor,
|
||||
)
|
||||
free_init_freq_filter = _get_freeinit_freq_filter(
|
||||
shape=free_init_filter_shape,
|
||||
device=device,
|
||||
filter_type=self._free_init_method,
|
||||
order=self._free_init_order,
|
||||
spatial_stop_frequency=self._free_init_spatial_stop_frequency,
|
||||
temporal_stop_frequency=self._free_init_temporal_stop_frequency,
|
||||
)
|
||||
|
||||
with self.progress_bar(total=self._free_init_num_iters) as free_init_progress_bar:
|
||||
for i in range(self._free_init_num_iters):
|
||||
# For the first FreeInit iteration, the original latent is used without modification.
|
||||
# Subsequent iterations apply the noise reinitialization technique.
|
||||
if i == 0:
|
||||
initial_noise = latents.detach().clone()
|
||||
else:
|
||||
current_diffuse_timestep = (
|
||||
self.scheduler.config.num_train_timesteps - 1
|
||||
) # diffuse to t=999 noise level
|
||||
diffuse_timesteps = torch.full((batch_size,), current_diffuse_timestep).long()
|
||||
z_T = self.scheduler.add_noise(
|
||||
original_samples=latents, noise=initial_noise, timesteps=diffuse_timesteps.to(device)
|
||||
).to(dtype=torch.float32)
|
||||
z_rand = randn_tensor(
|
||||
shape=latent_shape,
|
||||
generator=self._free_init_generator,
|
||||
device=device,
|
||||
dtype=torch.float32,
|
||||
)
|
||||
latents = _freq_mix_3d(z_T, z_rand, LPF=free_init_freq_filter)
|
||||
latents = latents.to(prompt_embeds.dtype)
|
||||
|
||||
# Coarse-to-Fine Sampling for faster inference (can lead to lower quality)
|
||||
if self._free_init_use_fast_sampling:
|
||||
current_num_inference_steps = int(num_inference_steps / self._free_init_num_iters * (i + 1))
|
||||
self.scheduler.set_timesteps(current_num_inference_steps, device=device)
|
||||
timesteps = self.scheduler.timesteps
|
||||
denoise_args.update({"timesteps": timesteps, "num_inference_steps": current_num_inference_steps})
|
||||
|
||||
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
||||
denoise_args.update({"latents": latents, "num_warmup_steps": num_warmup_steps})
|
||||
latents = self._denoise_loop(**denoise_args)
|
||||
|
||||
free_init_progress_bar.update()
|
||||
|
||||
return latents
|
||||
|
||||
def _retrieve_video_frames(self, latents, output_type, return_dict):
|
||||
"""Helper function to handle latents to output conversion."""
|
||||
if output_type == "latent":
|
||||
return AnimateDiffPipelineOutput(frames=latents)
|
||||
|
||||
video_tensor = self.decode_latents(latents)
|
||||
video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
|
||||
|
||||
if not return_dict:
|
||||
return (video,)
|
||||
|
||||
return AnimateDiffPipelineOutput(frames=video)
|
||||
|
||||
@property
|
||||
def guidance_scale(self):
|
||||
return self._guidance_scale
|
||||
@@ -1046,7 +778,6 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
||||
# 4. Prepare timesteps
|
||||
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
||||
timesteps = self.scheduler.timesteps
|
||||
self._num_timesteps = len(timesteps)
|
||||
|
||||
# 5. Prepare latent variables
|
||||
num_channels_latents = self.unet.config.in_channels
|
||||
@@ -1068,43 +799,64 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
||||
# 7. Add image embeds for IP-Adapter
|
||||
added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
|
||||
|
||||
# 8. Denoising loop
|
||||
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
||||
denoise_args = {
|
||||
"timesteps": timesteps,
|
||||
"num_inference_steps": num_inference_steps,
|
||||
"do_classifier_free_guidance": self.do_classifier_free_guidance,
|
||||
"guidance_scale": guidance_scale,
|
||||
"num_warmup_steps": num_warmup_steps,
|
||||
"prompt_embeds": prompt_embeds,
|
||||
"negative_prompt_embeds": negative_prompt_embeds,
|
||||
"latents": latents,
|
||||
"cross_attention_kwargs": self.cross_attention_kwargs,
|
||||
"added_cond_kwargs": added_cond_kwargs,
|
||||
"extra_step_kwargs": extra_step_kwargs,
|
||||
"callback": callback,
|
||||
"callback_steps": callback_steps,
|
||||
"callback_on_step_end": callback_on_step_end,
|
||||
"callback_on_step_end_tensor_inputs": callback_on_step_end_tensor_inputs,
|
||||
}
|
||||
num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1
|
||||
for free_init_iter in range(num_free_init_iters):
|
||||
if self.free_init_enabled:
|
||||
latents, timesteps = self._apply_free_init(
|
||||
latents, free_init_iter, num_inference_steps, device, latents.dtype, generator
|
||||
)
|
||||
|
||||
if self.free_init_enabled:
|
||||
latents = self._free_init_loop(
|
||||
height=height,
|
||||
width=width,
|
||||
num_frames=num_frames,
|
||||
num_channels_latents=num_channels_latents,
|
||||
batch_size=batch_size,
|
||||
num_videos_per_prompt=num_videos_per_prompt,
|
||||
denoise_args=denoise_args,
|
||||
device=device,
|
||||
)
|
||||
else:
|
||||
latents = self._denoise_loop(**denoise_args)
|
||||
self._num_timesteps = len(timesteps)
|
||||
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
||||
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
||||
for i, t in enumerate(timesteps):
|
||||
# expand the latents if we are doing classifier free guidance
|
||||
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
||||
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
||||
|
||||
video = self._retrieve_video_frames(latents, output_type, return_dict)
|
||||
# predict the noise residual
|
||||
noise_pred = self.unet(
|
||||
latent_model_input,
|
||||
t,
|
||||
encoder_hidden_states=prompt_embeds,
|
||||
cross_attention_kwargs=cross_attention_kwargs,
|
||||
added_cond_kwargs=added_cond_kwargs,
|
||||
).sample
|
||||
|
||||
# perform guidance
|
||||
if self.do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
||||
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
||||
|
||||
# compute the previous noisy sample x_t -> x_t-1
|
||||
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
||||
|
||||
if callback_on_step_end is not None:
|
||||
callback_kwargs = {}
|
||||
for k in callback_on_step_end_tensor_inputs:
|
||||
callback_kwargs[k] = locals()[k]
|
||||
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
||||
|
||||
latents = callback_outputs.pop("latents", latents)
|
||||
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
||||
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
||||
|
||||
# call the callback, if provided
|
||||
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
||||
progress_bar.update()
|
||||
if callback is not None and i % callback_steps == 0:
|
||||
callback(i, t, latents)
|
||||
|
||||
if output_type == "latent":
|
||||
return AnimateDiffPipelineOutput(frames=latents)
|
||||
|
||||
video_tensor = self.decode_latents(latents)
|
||||
video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
|
||||
|
||||
# 9. Offload all models
|
||||
self.maybe_free_model_hooks()
|
||||
|
||||
return video
|
||||
if not return_dict:
|
||||
return (video,)
|
||||
|
||||
return AnimateDiffPipelineOutput(frames=video)
|
||||
|
||||
@@ -34,6 +34,7 @@ from ...schedulers import (
|
||||
)
|
||||
from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
|
||||
from ...utils.torch_utils import randn_tensor
|
||||
from ..free_init_utils import FreeInitMixin
|
||||
from ..pipeline_utils import DiffusionPipeline
|
||||
from .pipeline_output import AnimateDiffPipelineOutput
|
||||
|
||||
@@ -163,7 +164,9 @@ def retrieve_timesteps(
|
||||
return timesteps, num_inference_steps
|
||||
|
||||
|
||||
class AnimateDiffVideoToVideoPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin):
|
||||
class AnimateDiffVideoToVideoPipeline(
|
||||
DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin, FreeInitMixin
|
||||
):
|
||||
r"""
|
||||
Pipeline for video-to-video generation.
|
||||
|
||||
@@ -193,7 +196,7 @@ class AnimateDiffVideoToVideoPipeline(DiffusionPipeline, TextualInversionLoaderM
|
||||
"""
|
||||
|
||||
model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
|
||||
_optional_components = ["feature_extractor", "image_encoder"]
|
||||
_optional_components = ["feature_extractor", "image_encoder", "motion_adapter"]
|
||||
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
||||
|
||||
def __init__(
|
||||
@@ -215,7 +218,8 @@ class AnimateDiffVideoToVideoPipeline(DiffusionPipeline, TextualInversionLoaderM
|
||||
image_encoder: CLIPVisionModelWithProjection = None,
|
||||
):
|
||||
super().__init__()
|
||||
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
|
||||
if isinstance(unet, UNet2DConditionModel):
|
||||
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
|
||||
|
||||
self.register_modules(
|
||||
vae=vae,
|
||||
@@ -584,12 +588,12 @@ class AnimateDiffVideoToVideoPipeline(DiffusionPipeline, TextualInversionLoaderM
|
||||
if video is not None and latents is not None:
|
||||
raise ValueError("Only one of `video` or `latents` should be provided")
|
||||
|
||||
def get_timesteps(self, num_inference_steps, strength, device):
|
||||
def get_timesteps(self, num_inference_steps, timesteps, strength, device):
|
||||
# get the original timestep using init_timestep
|
||||
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
||||
|
||||
t_start = max(num_inference_steps - init_timestep, 0)
|
||||
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
||||
timesteps = timesteps[t_start * self.scheduler.order :]
|
||||
|
||||
return timesteps, num_inference_steps - t_start
|
||||
|
||||
@@ -876,9 +880,8 @@ class AnimateDiffVideoToVideoPipeline(DiffusionPipeline, TextualInversionLoaderM
|
||||
|
||||
# 4. Prepare timesteps
|
||||
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
||||
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
|
||||
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, timesteps, strength, device)
|
||||
latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt)
|
||||
self._num_timesteps = len(timesteps)
|
||||
|
||||
# 5. Prepare latent variables
|
||||
num_channels_latents = self.unet.config.in_channels
|
||||
@@ -901,42 +904,55 @@ class AnimateDiffVideoToVideoPipeline(DiffusionPipeline, TextualInversionLoaderM
|
||||
# 7. Add image embeds for IP-Adapter
|
||||
added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
|
||||
|
||||
# 8. Denoising loop
|
||||
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
||||
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
||||
for i, t in enumerate(timesteps):
|
||||
# expand the latents if we are doing classifier free guidance
|
||||
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
||||
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
||||
num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1
|
||||
for free_init_iter in range(num_free_init_iters):
|
||||
if self.free_init_enabled:
|
||||
latents, timesteps = self._apply_free_init(
|
||||
latents, free_init_iter, num_inference_steps, device, latents.dtype, generator
|
||||
)
|
||||
num_inference_steps = len(timesteps)
|
||||
# make sure to readjust timesteps based on strength
|
||||
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, timesteps, strength, device)
|
||||
|
||||
# predict the noise residual
|
||||
noise_pred = self.unet(
|
||||
latent_model_input,
|
||||
t,
|
||||
encoder_hidden_states=prompt_embeds,
|
||||
cross_attention_kwargs=self.cross_attention_kwargs,
|
||||
added_cond_kwargs=added_cond_kwargs,
|
||||
).sample
|
||||
self._num_timesteps = len(timesteps)
|
||||
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
||||
# 8. Denoising loop
|
||||
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
||||
for i, t in enumerate(timesteps):
|
||||
# expand the latents if we are doing classifier free guidance
|
||||
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
||||
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
||||
|
||||
# perform guidance
|
||||
if self.do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
||||
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
||||
# predict the noise residual
|
||||
noise_pred = self.unet(
|
||||
latent_model_input,
|
||||
t,
|
||||
encoder_hidden_states=prompt_embeds,
|
||||
cross_attention_kwargs=self.cross_attention_kwargs,
|
||||
added_cond_kwargs=added_cond_kwargs,
|
||||
).sample
|
||||
|
||||
# compute the previous noisy sample x_t -> x_t-1
|
||||
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
||||
# perform guidance
|
||||
if self.do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
||||
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
||||
|
||||
if callback_on_step_end is not None:
|
||||
callback_kwargs = {}
|
||||
for k in callback_on_step_end_tensor_inputs:
|
||||
callback_kwargs[k] = locals()[k]
|
||||
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
||||
# compute the previous noisy sample x_t -> x_t-1
|
||||
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
||||
|
||||
latents = callback_outputs.pop("latents", latents)
|
||||
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
||||
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
||||
if callback_on_step_end is not None:
|
||||
callback_kwargs = {}
|
||||
for k in callback_on_step_end_tensor_inputs:
|
||||
callback_kwargs[k] = locals()[k]
|
||||
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
||||
|
||||
progress_bar.update()
|
||||
latents = callback_outputs.pop("latents", latents)
|
||||
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
||||
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
||||
|
||||
# call the callback, if provided
|
||||
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
||||
progress_bar.update()
|
||||
|
||||
if output_type == "latent":
|
||||
return AnimateDiffPipelineOutput(frames=latents)
|
||||
|
||||
@@ -972,6 +972,12 @@ class StableDiffusionControlNetImg2ImgPipeline(
|
||||
The height in pixels of the generated image.
|
||||
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
||||
The width in pixels of the generated image.
|
||||
strength (`float`, *optional*, defaults to 0.8):
|
||||
Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
|
||||
starting point and more noise is added the higher the `strength`. The number of denoising steps depends
|
||||
on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
|
||||
process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
|
||||
essentially ignores `image`.
|
||||
num_inference_steps (`int`, *optional*, defaults to 50):
|
||||
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
||||
expense of slower inference.
|
||||
|
||||
@@ -1156,15 +1156,15 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
|
||||
The width in pixels of the generated image. Anything below 512 pixels won't work well for
|
||||
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
||||
and checkpoints that are not specifically fine-tuned on low resolutions.
|
||||
strength (`float`, *optional*, defaults to 0.8):
|
||||
Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
|
||||
starting point and more noise is added the higher the `strength`. The number of denoising steps depends
|
||||
on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
|
||||
process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
|
||||
essentially ignores `image`.
|
||||
num_inference_steps (`int`, *optional*, defaults to 50):
|
||||
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
||||
expense of slower inference.
|
||||
strength (`float`, *optional*, defaults to 0.3):
|
||||
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
|
||||
will be used as a starting point, adding more noise to it the larger the `strength`. The number of
|
||||
denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
|
||||
be maximum and the denoising process will run for the full number of iterations specified in
|
||||
`num_inference_steps`.
|
||||
guidance_scale (`float`, *optional*, defaults to 7.5):
|
||||
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
||||
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
||||
|
||||
184
src/diffusers/pipelines/free_init_utils.py
Normal file
184
src/diffusers/pipelines/free_init_utils.py
Normal file
@@ -0,0 +1,184 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import math
|
||||
from typing import Tuple, Union
|
||||
|
||||
import torch
|
||||
import torch.fft as fft
|
||||
|
||||
from ..utils.torch_utils import randn_tensor
|
||||
|
||||
|
||||
class FreeInitMixin:
|
||||
r"""Mixin class for FreeInit."""
|
||||
|
||||
def enable_free_init(
|
||||
self,
|
||||
num_iters: int = 3,
|
||||
use_fast_sampling: bool = False,
|
||||
method: str = "butterworth",
|
||||
order: int = 4,
|
||||
spatial_stop_frequency: float = 0.25,
|
||||
temporal_stop_frequency: float = 0.25,
|
||||
):
|
||||
"""Enables the FreeInit mechanism as in https://arxiv.org/abs/2312.07537.
|
||||
|
||||
This implementation has been adapted from the [official repository](https://github.com/TianxingWu/FreeInit).
|
||||
|
||||
Args:
|
||||
num_iters (`int`, *optional*, defaults to `3`):
|
||||
Number of FreeInit noise re-initialization iterations.
|
||||
use_fast_sampling (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not to speedup sampling procedure at the cost of probably lower quality results. Enables
|
||||
the "Coarse-to-Fine Sampling" strategy, as mentioned in the paper, if set to `True`.
|
||||
method (`str`, *optional*, defaults to `butterworth`):
|
||||
Must be one of `butterworth`, `ideal` or `gaussian` to use as the filtering method for the
|
||||
FreeInit low pass filter.
|
||||
order (`int`, *optional*, defaults to `4`):
|
||||
Order of the filter used in `butterworth` method. Larger values lead to `ideal` method behaviour
|
||||
whereas lower values lead to `gaussian` method behaviour.
|
||||
spatial_stop_frequency (`float`, *optional*, defaults to `0.25`):
|
||||
Normalized stop frequency for spatial dimensions. Must be between 0 to 1. Referred to as `d_s` in
|
||||
the original implementation.
|
||||
temporal_stop_frequency (`float`, *optional*, defaults to `0.25`):
|
||||
Normalized stop frequency for temporal dimensions. Must be between 0 to 1. Referred to as `d_t` in
|
||||
the original implementation.
|
||||
"""
|
||||
self._free_init_num_iters = num_iters
|
||||
self._free_init_use_fast_sampling = use_fast_sampling
|
||||
self._free_init_method = method
|
||||
self._free_init_order = order
|
||||
self._free_init_spatial_stop_frequency = spatial_stop_frequency
|
||||
self._free_init_temporal_stop_frequency = temporal_stop_frequency
|
||||
|
||||
def disable_free_init(self):
|
||||
"""Disables the FreeInit mechanism if enabled."""
|
||||
self._free_init_num_iters = None
|
||||
|
||||
@property
|
||||
def free_init_enabled(self):
|
||||
return hasattr(self, "_free_init_num_iters") and self._free_init_num_iters is not None
|
||||
|
||||
def _get_free_init_freq_filter(
|
||||
self,
|
||||
shape: Tuple[int, ...],
|
||||
device: Union[str, torch.dtype],
|
||||
filter_type: str,
|
||||
order: float,
|
||||
spatial_stop_frequency: float,
|
||||
temporal_stop_frequency: float,
|
||||
) -> torch.Tensor:
|
||||
r"""Returns the FreeInit filter based on filter type and other input conditions."""
|
||||
|
||||
time, height, width = shape[-3], shape[-2], shape[-1]
|
||||
mask = torch.zeros(shape)
|
||||
|
||||
if spatial_stop_frequency == 0 or temporal_stop_frequency == 0:
|
||||
return mask
|
||||
|
||||
if filter_type == "butterworth":
|
||||
|
||||
def retrieve_mask(x):
|
||||
return 1 / (1 + (x / spatial_stop_frequency**2) ** order)
|
||||
elif filter_type == "gaussian":
|
||||
|
||||
def retrieve_mask(x):
|
||||
return math.exp(-1 / (2 * spatial_stop_frequency**2) * x)
|
||||
elif filter_type == "ideal":
|
||||
|
||||
def retrieve_mask(x):
|
||||
return 1 if x <= spatial_stop_frequency * 2 else 0
|
||||
else:
|
||||
raise NotImplementedError("`filter_type` must be one of gaussian, butterworth or ideal")
|
||||
|
||||
for t in range(time):
|
||||
for h in range(height):
|
||||
for w in range(width):
|
||||
d_square = (
|
||||
((spatial_stop_frequency / temporal_stop_frequency) * (2 * t / time - 1)) ** 2
|
||||
+ (2 * h / height - 1) ** 2
|
||||
+ (2 * w / width - 1) ** 2
|
||||
)
|
||||
mask[..., t, h, w] = retrieve_mask(d_square)
|
||||
|
||||
return mask.to(device)
|
||||
|
||||
def _apply_freq_filter(self, x: torch.Tensor, noise: torch.Tensor, low_pass_filter: torch.Tensor) -> torch.Tensor:
|
||||
r"""Noise reinitialization."""
|
||||
# FFT
|
||||
x_freq = fft.fftn(x, dim=(-3, -2, -1))
|
||||
x_freq = fft.fftshift(x_freq, dim=(-3, -2, -1))
|
||||
noise_freq = fft.fftn(noise, dim=(-3, -2, -1))
|
||||
noise_freq = fft.fftshift(noise_freq, dim=(-3, -2, -1))
|
||||
|
||||
# frequency mix
|
||||
high_pass_filter = 1 - low_pass_filter
|
||||
x_freq_low = x_freq * low_pass_filter
|
||||
noise_freq_high = noise_freq * high_pass_filter
|
||||
x_freq_mixed = x_freq_low + noise_freq_high # mix in freq domain
|
||||
|
||||
# IFFT
|
||||
x_freq_mixed = fft.ifftshift(x_freq_mixed, dim=(-3, -2, -1))
|
||||
x_mixed = fft.ifftn(x_freq_mixed, dim=(-3, -2, -1)).real
|
||||
|
||||
return x_mixed
|
||||
|
||||
def _apply_free_init(
|
||||
self,
|
||||
latents: torch.Tensor,
|
||||
free_init_iteration: int,
|
||||
num_inference_steps: int,
|
||||
device: torch.device,
|
||||
dtype: torch.dtype,
|
||||
generator: torch.Generator,
|
||||
):
|
||||
if free_init_iteration == 0:
|
||||
self._free_init_initial_noise = latents.detach().clone()
|
||||
return latents, self.scheduler.timesteps
|
||||
|
||||
latent_shape = latents.shape
|
||||
|
||||
free_init_filter_shape = (1, *latent_shape[1:])
|
||||
free_init_freq_filter = self._get_free_init_freq_filter(
|
||||
shape=free_init_filter_shape,
|
||||
device=device,
|
||||
filter_type=self._free_init_method,
|
||||
order=self._free_init_order,
|
||||
spatial_stop_frequency=self._free_init_spatial_stop_frequency,
|
||||
temporal_stop_frequency=self._free_init_temporal_stop_frequency,
|
||||
)
|
||||
|
||||
current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1
|
||||
diffuse_timesteps = torch.full((latent_shape[0],), current_diffuse_timestep).long()
|
||||
|
||||
z_t = self.scheduler.add_noise(
|
||||
original_samples=latents, noise=self._free_init_initial_noise, timesteps=diffuse_timesteps.to(device)
|
||||
).to(dtype=torch.float32)
|
||||
|
||||
z_rand = randn_tensor(
|
||||
shape=latent_shape,
|
||||
generator=generator,
|
||||
device=device,
|
||||
dtype=torch.float32,
|
||||
)
|
||||
latents = self._apply_freq_filter(z_t, z_rand, low_pass_filter=free_init_freq_filter)
|
||||
latents = latents.to(dtype)
|
||||
|
||||
# Coarse-to-Fine Sampling for faster inference (can lead to lower quality)
|
||||
if self._free_init_use_fast_sampling:
|
||||
num_inference_steps = int(num_inference_steps / self._free_init_num_iters * (free_init_iteration + 1))
|
||||
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
||||
|
||||
return latents, self.scheduler.timesteps
|
||||
@@ -45,6 +45,7 @@ from ...utils import (
|
||||
unscale_lora_layers,
|
||||
)
|
||||
from ...utils.torch_utils import randn_tensor
|
||||
from ..free_init_utils import FreeInitMixin
|
||||
from ..pipeline_utils import DiffusionPipeline
|
||||
|
||||
|
||||
@@ -210,7 +211,7 @@ class PIAPipelineOutput(BaseOutput):
|
||||
|
||||
|
||||
class PIAPipeline(
|
||||
DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin, FromSingleFileMixin
|
||||
DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin, FromSingleFileMixin, FreeInitMixin
|
||||
):
|
||||
r"""
|
||||
Pipeline for text-to-video generation.
|
||||
@@ -560,58 +561,6 @@ class PIAPipeline(
|
||||
"""Disables the FreeU mechanism if enabled."""
|
||||
self.unet.disable_freeu()
|
||||
|
||||
@property
|
||||
def free_init_enabled(self):
|
||||
return hasattr(self, "_free_init_num_iters") and self._free_init_num_iters is not None
|
||||
|
||||
def enable_free_init(
|
||||
self,
|
||||
num_iters: int = 3,
|
||||
use_fast_sampling: bool = False,
|
||||
method: str = "butterworth",
|
||||
order: int = 4,
|
||||
spatial_stop_frequency: float = 0.25,
|
||||
temporal_stop_frequency: float = 0.25,
|
||||
generator: Optional[torch.Generator] = None,
|
||||
):
|
||||
"""Enables the FreeInit mechanism as in https://arxiv.org/abs/2312.07537.
|
||||
|
||||
This implementation has been adapted from the [official repository](https://github.com/TianxingWu/FreeInit).
|
||||
|
||||
Args:
|
||||
num_iters (`int`, *optional*, defaults to `3`):
|
||||
Number of FreeInit noise re-initialization iterations.
|
||||
use_fast_sampling (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not to speedup sampling procedure at the cost of probably lower quality results. Enables
|
||||
the "Coarse-to-Fine Sampling" strategy, as mentioned in the paper, if set to `True`.
|
||||
method (`str`, *optional*, defaults to `butterworth`):
|
||||
Must be one of `butterworth`, `ideal` or `gaussian` to use as the filtering method for the
|
||||
FreeInit low pass filter.
|
||||
order (`int`, *optional*, defaults to `4`):
|
||||
Order of the filter used in `butterworth` method. Larger values lead to `ideal` method behaviour
|
||||
whereas lower values lead to `gaussian` method behaviour.
|
||||
spatial_stop_frequency (`float`, *optional*, defaults to `0.25`):
|
||||
Normalized stop frequency for spatial dimensions. Must be between 0 to 1. Referred to as `d_s` in
|
||||
the original implementation.
|
||||
temporal_stop_frequency (`float`, *optional*, defaults to `0.25`):
|
||||
Normalized stop frequency for temporal dimensions. Must be between 0 to 1. Referred to as `d_t` in
|
||||
the original implementation.
|
||||
generator (`torch.Generator`, *optional*, defaults to `0.25`):
|
||||
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
||||
FreeInit generation deterministic.
|
||||
"""
|
||||
self._free_init_num_iters = num_iters
|
||||
self._free_init_use_fast_sampling = use_fast_sampling
|
||||
self._free_init_method = method
|
||||
self._free_init_order = order
|
||||
self._free_init_spatial_stop_frequency = spatial_stop_frequency
|
||||
self._free_init_temporal_stop_frequency = temporal_stop_frequency
|
||||
self._free_init_generator = generator
|
||||
|
||||
def disable_free_init(self):
|
||||
"""Disables the FreeInit mechanism if enabled."""
|
||||
self._free_init_num_iters = None
|
||||
|
||||
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
||||
def prepare_extra_step_kwargs(self, generator, eta):
|
||||
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
||||
@@ -795,143 +744,6 @@ class PIAPipeline(
|
||||
|
||||
return mask, masked_image
|
||||
|
||||
def _denoise_loop(
|
||||
self,
|
||||
timesteps,
|
||||
num_inference_steps,
|
||||
do_classifier_free_guidance,
|
||||
guidance_scale,
|
||||
num_warmup_steps,
|
||||
prompt_embeds,
|
||||
negative_prompt_embeds,
|
||||
latents,
|
||||
mask,
|
||||
masked_image,
|
||||
cross_attention_kwargs,
|
||||
added_cond_kwargs,
|
||||
extra_step_kwargs,
|
||||
callback_on_step_end,
|
||||
callback_on_step_end_tensor_inputs,
|
||||
):
|
||||
"""Denoising loop for PIA."""
|
||||
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
||||
for i, t in enumerate(timesteps):
|
||||
# expand the latents if we are doing classifier free guidance
|
||||
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
||||
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
||||
latent_model_input = torch.cat([latent_model_input, mask, masked_image], dim=1)
|
||||
|
||||
# predict the noise residual
|
||||
noise_pred = self.unet(
|
||||
latent_model_input,
|
||||
t,
|
||||
encoder_hidden_states=prompt_embeds,
|
||||
cross_attention_kwargs=cross_attention_kwargs,
|
||||
added_cond_kwargs=added_cond_kwargs,
|
||||
).sample
|
||||
|
||||
# perform guidance
|
||||
if do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
||||
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
||||
|
||||
# compute the previous noisy sample x_t -> x_t-1
|
||||
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
||||
|
||||
if callback_on_step_end is not None:
|
||||
callback_kwargs = {}
|
||||
for k in callback_on_step_end_tensor_inputs:
|
||||
callback_kwargs[k] = locals()[k]
|
||||
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
||||
|
||||
latents = callback_outputs.pop("latents", latents)
|
||||
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
||||
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
||||
|
||||
# call the callback, if provided
|
||||
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
||||
progress_bar.update()
|
||||
|
||||
return latents
|
||||
|
||||
def _free_init_loop(
|
||||
self,
|
||||
height,
|
||||
width,
|
||||
num_frames,
|
||||
batch_size,
|
||||
num_videos_per_prompt,
|
||||
denoise_args,
|
||||
device,
|
||||
):
|
||||
"""Denoising loop for PIA using FreeInit noise reinitialization technique."""
|
||||
|
||||
latents = denoise_args.get("latents")
|
||||
prompt_embeds = denoise_args.get("prompt_embeds")
|
||||
timesteps = denoise_args.get("timesteps")
|
||||
num_inference_steps = denoise_args.get("num_inference_steps")
|
||||
|
||||
latent_shape = (
|
||||
batch_size * num_videos_per_prompt,
|
||||
4,
|
||||
num_frames,
|
||||
height // self.vae_scale_factor,
|
||||
width // self.vae_scale_factor,
|
||||
)
|
||||
free_init_filter_shape = (
|
||||
1,
|
||||
4,
|
||||
num_frames,
|
||||
height // self.vae_scale_factor,
|
||||
width // self.vae_scale_factor,
|
||||
)
|
||||
free_init_freq_filter = _get_freeinit_freq_filter(
|
||||
shape=free_init_filter_shape,
|
||||
device=device,
|
||||
filter_type=self._free_init_method,
|
||||
order=self._free_init_order,
|
||||
spatial_stop_frequency=self._free_init_spatial_stop_frequency,
|
||||
temporal_stop_frequency=self._free_init_temporal_stop_frequency,
|
||||
)
|
||||
|
||||
with self.progress_bar(total=self._free_init_num_iters) as free_init_progress_bar:
|
||||
for i in range(self._free_init_num_iters):
|
||||
# For the first FreeInit iteration, the original latent is used without modification.
|
||||
# Subsequent iterations apply the noise reinitialization technique.
|
||||
if i == 0:
|
||||
initial_noise = latents.detach().clone()
|
||||
else:
|
||||
current_diffuse_timestep = (
|
||||
self.scheduler.config.num_train_timesteps - 1
|
||||
) # diffuse to t=999 noise level
|
||||
diffuse_timesteps = torch.full((batch_size,), current_diffuse_timestep).long()
|
||||
z_T = self.scheduler.add_noise(
|
||||
original_samples=latents, noise=initial_noise, timesteps=diffuse_timesteps.to(device)
|
||||
).to(dtype=torch.float32)
|
||||
z_rand = randn_tensor(
|
||||
shape=latent_shape,
|
||||
generator=self._free_init_generator,
|
||||
device=device,
|
||||
dtype=torch.float32,
|
||||
)
|
||||
latents = _freq_mix_3d(z_T, z_rand, LPF=free_init_freq_filter)
|
||||
latents = latents.to(prompt_embeds.dtype)
|
||||
|
||||
# Coarse-to-Fine Sampling for faster inference (can lead to lower quality)
|
||||
if self._free_init_use_fast_sampling:
|
||||
current_num_inference_steps = int(num_inference_steps / self._free_init_num_iters * (i + 1))
|
||||
self.scheduler.set_timesteps(current_num_inference_steps, device=device)
|
||||
timesteps = self.scheduler.timesteps
|
||||
denoise_args.update({"timesteps": timesteps, "num_inference_steps": current_num_inference_steps})
|
||||
|
||||
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
||||
denoise_args.update({"latents": latents, "num_warmup_steps": num_warmup_steps})
|
||||
latents = self._denoise_loop(**denoise_args)
|
||||
|
||||
free_init_progress_bar.update()
|
||||
|
||||
return latents
|
||||
|
||||
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
|
||||
def get_timesteps(self, num_inference_steps, strength, device):
|
||||
# get the original timestep using init_timestep
|
||||
@@ -944,19 +756,6 @@ class PIAPipeline(
|
||||
|
||||
return timesteps, num_inference_steps - t_start
|
||||
|
||||
def _retrieve_video_frames(self, latents, output_type, return_dict):
|
||||
"""Helper function to handle latents to output conversion."""
|
||||
if output_type == "latent":
|
||||
return PIAPipelineOutput(frames=latents)
|
||||
|
||||
video_tensor = self.decode_latents(latents)
|
||||
video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
|
||||
|
||||
if not return_dict:
|
||||
return (video,)
|
||||
|
||||
return PIAPipelineOutput(frames=video)
|
||||
|
||||
@property
|
||||
def guidance_scale(self):
|
||||
return self._guidance_scale
|
||||
@@ -1191,41 +990,62 @@ class PIAPipeline(
|
||||
added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
|
||||
|
||||
# 8. Denoising loop
|
||||
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
||||
denoise_args = {
|
||||
"timesteps": timesteps,
|
||||
"num_inference_steps": num_inference_steps,
|
||||
"do_classifier_free_guidance": self.do_classifier_free_guidance,
|
||||
"guidance_scale": guidance_scale,
|
||||
"num_warmup_steps": num_warmup_steps,
|
||||
"prompt_embeds": prompt_embeds,
|
||||
"negative_prompt_embeds": negative_prompt_embeds,
|
||||
"latents": latents,
|
||||
"mask": mask,
|
||||
"masked_image": masked_image,
|
||||
"cross_attention_kwargs": self.cross_attention_kwargs,
|
||||
"added_cond_kwargs": added_cond_kwargs,
|
||||
"extra_step_kwargs": extra_step_kwargs,
|
||||
"callback_on_step_end": callback_on_step_end,
|
||||
"callback_on_step_end_tensor_inputs": callback_on_step_end_tensor_inputs,
|
||||
}
|
||||
num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1
|
||||
for free_init_iter in range(num_free_init_iters):
|
||||
if self.free_init_enabled:
|
||||
latents, timesteps = self._apply_free_init(
|
||||
latents, free_init_iter, num_inference_steps, device, latents.dtype, generator
|
||||
)
|
||||
|
||||
if self.free_init_enabled:
|
||||
latents = self._free_init_loop(
|
||||
height=height,
|
||||
width=width,
|
||||
num_frames=num_frames,
|
||||
batch_size=batch_size,
|
||||
num_videos_per_prompt=num_videos_per_prompt,
|
||||
denoise_args=denoise_args,
|
||||
device=device,
|
||||
)
|
||||
else:
|
||||
latents = self._denoise_loop(**denoise_args)
|
||||
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
||||
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
||||
for i, t in enumerate(timesteps):
|
||||
# expand the latents if we are doing classifier free guidance
|
||||
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
||||
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
||||
latent_model_input = torch.cat([latent_model_input, mask, masked_image], dim=1)
|
||||
|
||||
video = self._retrieve_video_frames(latents, output_type, return_dict)
|
||||
# predict the noise residual
|
||||
noise_pred = self.unet(
|
||||
latent_model_input,
|
||||
t,
|
||||
encoder_hidden_states=prompt_embeds,
|
||||
cross_attention_kwargs=cross_attention_kwargs,
|
||||
added_cond_kwargs=added_cond_kwargs,
|
||||
).sample
|
||||
|
||||
# perform guidance
|
||||
if self.do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
||||
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
||||
|
||||
# compute the previous noisy sample x_t -> x_t-1
|
||||
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
||||
|
||||
if callback_on_step_end is not None:
|
||||
callback_kwargs = {}
|
||||
for k in callback_on_step_end_tensor_inputs:
|
||||
callback_kwargs[k] = locals()[k]
|
||||
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
||||
|
||||
latents = callback_outputs.pop("latents", latents)
|
||||
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
||||
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
||||
|
||||
# call the callback, if provided
|
||||
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
||||
progress_bar.update()
|
||||
|
||||
if output_type == "latent":
|
||||
return PIAPipelineOutput(frames=latents)
|
||||
|
||||
video_tensor = self.decode_latents(latents)
|
||||
video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
|
||||
|
||||
# 9. Offload all models
|
||||
self.maybe_free_model_hooks()
|
||||
|
||||
return video
|
||||
if not return_dict:
|
||||
return (video,)
|
||||
|
||||
return PIAPipelineOutput(frames=video)
|
||||
|
||||
@@ -553,13 +553,15 @@ class StableDiffusionInstructPix2PixPipeline(
|
||||
else:
|
||||
attention_mask = None
|
||||
|
||||
prompt_embeds = self.text_encoder(
|
||||
text_input_ids.to(device),
|
||||
attention_mask=attention_mask,
|
||||
)
|
||||
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
||||
prompt_embeds = prompt_embeds[0]
|
||||
|
||||
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
||||
if self.text_encoder is not None:
|
||||
prompt_embeds_dtype = self.text_encoder.dtype
|
||||
else:
|
||||
prompt_embeds_dtype = self.unet.dtype
|
||||
|
||||
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
||||
|
||||
bs_embed, seq_len, _ = prompt_embeds.shape
|
||||
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
||||
@@ -615,7 +617,7 @@ class StableDiffusionInstructPix2PixPipeline(
|
||||
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
||||
seq_len = negative_prompt_embeds.shape[1]
|
||||
|
||||
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
||||
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
||||
|
||||
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
||||
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
||||
|
||||
@@ -132,15 +132,15 @@ class StableVideoDiffusionPipeline(DiffusionPipeline):
|
||||
image = _resize_with_antialiasing(image, (224, 224))
|
||||
image = (image + 1.0) / 2.0
|
||||
|
||||
# Normalize the image with for CLIP input
|
||||
image = self.feature_extractor(
|
||||
images=image,
|
||||
do_normalize=True,
|
||||
do_center_crop=False,
|
||||
do_resize=False,
|
||||
do_rescale=False,
|
||||
return_tensors="pt",
|
||||
).pixel_values
|
||||
# Normalize the image with for CLIP input
|
||||
image = self.feature_extractor(
|
||||
images=image,
|
||||
do_normalize=True,
|
||||
do_center_crop=False,
|
||||
do_resize=False,
|
||||
do_rescale=False,
|
||||
return_tensors="pt",
|
||||
).pixel_values
|
||||
|
||||
image = image.to(device=device, dtype=dtype)
|
||||
image_embeddings = self.image_encoder(image).image_embeds
|
||||
@@ -333,8 +333,7 @@ class StableVideoDiffusionPipeline(DiffusionPipeline):
|
||||
|
||||
Args:
|
||||
image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`):
|
||||
Image or images to guide image generation. If you provide a tensor, it needs to be compatible with
|
||||
[`CLIPImageProcessor`](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json).
|
||||
Image or images to guide image generation. If you provide a tensor, the expected value range is between `[0,1]`.
|
||||
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
||||
The height in pixels of the generated image.
|
||||
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
||||
|
||||
@@ -21,7 +21,7 @@ from typing import Any, Tuple
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .import_utils import is_torch_available
|
||||
from .import_utils import is_torch_available, is_torch_version
|
||||
|
||||
|
||||
def is_tensor(x) -> bool:
|
||||
@@ -60,11 +60,18 @@ class BaseOutput(OrderedDict):
|
||||
if is_torch_available():
|
||||
import torch.utils._pytree
|
||||
|
||||
torch.utils._pytree._register_pytree_node(
|
||||
cls,
|
||||
torch.utils._pytree._dict_flatten,
|
||||
lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)),
|
||||
)
|
||||
if is_torch_version("<", "2.2"):
|
||||
torch.utils._pytree._register_pytree_node(
|
||||
cls,
|
||||
torch.utils._pytree._dict_flatten,
|
||||
lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)),
|
||||
)
|
||||
else:
|
||||
torch.utils._pytree.register_pytree_node(
|
||||
cls,
|
||||
torch.utils._pytree._dict_flatten,
|
||||
lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)),
|
||||
)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
class_fields = fields(self)
|
||||
|
||||
@@ -242,7 +242,6 @@ class AnimateDiffPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
||||
inputs_normal = self.get_dummy_inputs(torch_device)
|
||||
frames_normal = pipe(**inputs_normal).frames[0]
|
||||
|
||||
free_init_generator = torch.Generator(device=torch_device).manual_seed(0)
|
||||
pipe.enable_free_init(
|
||||
num_iters=2,
|
||||
use_fast_sampling=True,
|
||||
@@ -250,7 +249,6 @@ class AnimateDiffPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
||||
order=4,
|
||||
spatial_stop_frequency=0.25,
|
||||
temporal_stop_frequency=0.25,
|
||||
generator=free_init_generator,
|
||||
)
|
||||
inputs_enable_free_init = self.get_dummy_inputs(torch_device)
|
||||
frames_enable_free_init = pipe(**inputs_enable_free_init).frames[0]
|
||||
|
||||
@@ -267,3 +267,38 @@ class AnimateDiffVideoToVideoPipelineFastTests(PipelineTesterMixin, unittest.Tes
|
||||
|
||||
max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()
|
||||
self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results")
|
||||
|
||||
def test_free_init(self):
|
||||
components = self.get_dummy_components()
|
||||
pipe = self.pipeline_class(**components)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
pipe.to(torch_device)
|
||||
|
||||
inputs_normal = self.get_dummy_inputs(torch_device)
|
||||
frames_normal = pipe(**inputs_normal).frames[0]
|
||||
|
||||
pipe.enable_free_init(
|
||||
num_iters=2,
|
||||
use_fast_sampling=True,
|
||||
method="butterworth",
|
||||
order=4,
|
||||
spatial_stop_frequency=0.25,
|
||||
temporal_stop_frequency=0.25,
|
||||
)
|
||||
inputs_enable_free_init = self.get_dummy_inputs(torch_device)
|
||||
frames_enable_free_init = pipe(**inputs_enable_free_init).frames[0]
|
||||
|
||||
pipe.disable_free_init()
|
||||
inputs_disable_free_init = self.get_dummy_inputs(torch_device)
|
||||
frames_disable_free_init = pipe(**inputs_disable_free_init).frames[0]
|
||||
|
||||
sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum()
|
||||
max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_init)).max()
|
||||
self.assertGreater(
|
||||
sum_enabled, 1e1, "Enabling of FreeInit should lead to results different from the default pipeline results"
|
||||
)
|
||||
self.assertLess(
|
||||
max_diff_disabled,
|
||||
1e-4,
|
||||
"Disabling of FreeInit should lead to results similar to the default pipeline results",
|
||||
)
|
||||
|
||||
@@ -31,6 +31,7 @@ from diffusers import (
|
||||
StableDiffusionXLInpaintPipeline,
|
||||
StableDiffusionXLPipeline,
|
||||
)
|
||||
from diffusers.image_processor import IPAdapterMaskProcessor
|
||||
from diffusers.models.attention_processor import AttnProcessor, AttnProcessor2_0
|
||||
from diffusers.utils import load_image
|
||||
from diffusers.utils.testing_utils import (
|
||||
@@ -64,7 +65,7 @@ class IPAdapterNightlyTestsMixin(unittest.TestCase):
|
||||
image_processor = CLIPImageProcessor.from_pretrained(repo_id)
|
||||
return image_processor
|
||||
|
||||
def get_dummy_inputs(self, for_image_to_image=False, for_inpainting=False, for_sdxl=False):
|
||||
def get_dummy_inputs(self, for_image_to_image=False, for_inpainting=False, for_sdxl=False, for_masks=False):
|
||||
image = load_image(
|
||||
"https://user-images.githubusercontent.com/24734142/266492875-2d50d223-8475-44f0-a7c6-08b51cb53572.png"
|
||||
)
|
||||
@@ -101,6 +102,22 @@ class IPAdapterNightlyTestsMixin(unittest.TestCase):
|
||||
|
||||
input_kwargs.update({"image": image, "mask_image": mask, "ip_adapter_image": ip_image})
|
||||
|
||||
elif for_masks:
|
||||
face_image1 = load_image(
|
||||
"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_girl1.png"
|
||||
)
|
||||
face_image2 = load_image(
|
||||
"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_girl2.png"
|
||||
)
|
||||
mask1 = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_mask1.png")
|
||||
mask2 = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_mask2.png")
|
||||
input_kwargs.update(
|
||||
{
|
||||
"ip_adapter_image": [[face_image1], [face_image2]],
|
||||
"cross_attention_kwargs": {"ip_adapter_masks": [mask1, mask2]},
|
||||
}
|
||||
)
|
||||
|
||||
return input_kwargs
|
||||
|
||||
|
||||
@@ -465,3 +482,58 @@ class IPAdapterSDXLIntegrationTests(IPAdapterNightlyTestsMixin):
|
||||
|
||||
max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice)
|
||||
assert max_diff < 5e-4
|
||||
|
||||
def test_ip_adapter_single_mask(self):
|
||||
image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder")
|
||||
pipeline = StableDiffusionXLPipeline.from_pretrained(
|
||||
"stabilityai/stable-diffusion-xl-base-1.0",
|
||||
image_encoder=image_encoder,
|
||||
torch_dtype=self.dtype,
|
||||
)
|
||||
pipeline.to(torch_device)
|
||||
pipeline.load_ip_adapter(
|
||||
"h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus-face_sdxl_vit-h.safetensors"
|
||||
)
|
||||
pipeline.set_ip_adapter_scale(0.7)
|
||||
|
||||
inputs = self.get_dummy_inputs(for_masks=True)
|
||||
mask = inputs["cross_attention_kwargs"]["ip_adapter_masks"][0]
|
||||
processor = IPAdapterMaskProcessor()
|
||||
mask = processor.preprocess(mask)
|
||||
inputs["cross_attention_kwargs"]["ip_adapter_masks"] = mask
|
||||
inputs["ip_adapter_image"] = inputs["ip_adapter_image"][0]
|
||||
images = pipeline(**inputs).images
|
||||
image_slice = images[0, :3, :3, -1].flatten()
|
||||
expected_slice = np.array(
|
||||
[0.7307304, 0.73450166, 0.73731124, 0.7377061, 0.7318013, 0.73720926, 0.74746597, 0.7409929, 0.74074936]
|
||||
)
|
||||
|
||||
max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice)
|
||||
assert max_diff < 5e-4
|
||||
|
||||
def test_ip_adapter_multiple_masks(self):
|
||||
image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder")
|
||||
pipeline = StableDiffusionXLPipeline.from_pretrained(
|
||||
"stabilityai/stable-diffusion-xl-base-1.0",
|
||||
image_encoder=image_encoder,
|
||||
torch_dtype=self.dtype,
|
||||
)
|
||||
pipeline.to(torch_device)
|
||||
pipeline.load_ip_adapter(
|
||||
"h94/IP-Adapter", subfolder="sdxl_models", weight_name=["ip-adapter-plus-face_sdxl_vit-h.safetensors"] * 2
|
||||
)
|
||||
pipeline.set_ip_adapter_scale([0.7] * 2)
|
||||
|
||||
inputs = self.get_dummy_inputs(for_masks=True)
|
||||
masks = inputs["cross_attention_kwargs"]["ip_adapter_masks"]
|
||||
processor = IPAdapterMaskProcessor()
|
||||
masks = processor.preprocess(masks)
|
||||
inputs["cross_attention_kwargs"]["ip_adapter_masks"] = masks
|
||||
images = pipeline(**inputs).images
|
||||
image_slice = images[0, :3, :3, -1].flatten()
|
||||
expected_slice = np.array(
|
||||
[0.79474676, 0.7977683, 0.8013954, 0.7988008, 0.7970615, 0.8029355, 0.80614823, 0.8050743, 0.80627424]
|
||||
)
|
||||
|
||||
max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice)
|
||||
assert max_diff < 5e-4
|
||||
|
||||
@@ -255,7 +255,6 @@ class PIAPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
||||
inputs_normal = self.get_dummy_inputs(torch_device)
|
||||
frames_normal = pipe(**inputs_normal).frames[0]
|
||||
|
||||
free_init_generator = torch.Generator(device=torch_device).manual_seed(0)
|
||||
pipe.enable_free_init(
|
||||
num_iters=2,
|
||||
use_fast_sampling=True,
|
||||
@@ -263,7 +262,6 @@ class PIAPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
||||
order=4,
|
||||
spatial_stop_frequency=0.25,
|
||||
temporal_stop_frequency=0.25,
|
||||
generator=free_init_generator,
|
||||
)
|
||||
inputs_enable_free_init = self.get_dummy_inputs(torch_device)
|
||||
frames_enable_free_init = pipe(**inputs_enable_free_init).frames[0]
|
||||
|
||||
@@ -27,7 +27,13 @@ from diffusers import (
|
||||
PixArtAlphaPipeline,
|
||||
Transformer2DModel,
|
||||
)
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
|
||||
from diffusers.utils.testing_utils import (
|
||||
enable_full_determinism,
|
||||
numpy_cosine_similarity_distance,
|
||||
require_torch_gpu,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
|
||||
from ..test_pipelines_common import PipelineTesterMixin, to_np
|
||||
@@ -332,37 +338,35 @@ class PixArtAlphaPipelineIntegrationTests(unittest.TestCase):
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
def test_pixart_1024(self):
|
||||
generator = torch.manual_seed(0)
|
||||
generator = torch.Generator("cpu").manual_seed(0)
|
||||
|
||||
pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_1024, torch_dtype=torch.float16)
|
||||
pipe.enable_model_cpu_offload()
|
||||
prompt = self.prompt
|
||||
|
||||
image = pipe(prompt, generator=generator, output_type="np").images
|
||||
image = pipe(prompt, generator=generator, num_inference_steps=2, output_type="np").images
|
||||
|
||||
image_slice = image[0, -3:, -3:, -1]
|
||||
expected_slice = np.array([0.0742, 0.0835, 0.2114, 0.0295, 0.0784, 0.2361, 0.1738, 0.2251, 0.3589])
|
||||
|
||||
expected_slice = np.array([0.1941, 0.2117, 0.2188, 0.1946, 0.218, 0.2124, 0.199, 0.2437, 0.2583])
|
||||
|
||||
max_diff = np.abs(image_slice.flatten() - expected_slice).max()
|
||||
self.assertLessEqual(max_diff, 1e-3)
|
||||
max_diff = numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice)
|
||||
self.assertLessEqual(max_diff, 1e-4)
|
||||
|
||||
def test_pixart_512(self):
|
||||
generator = torch.manual_seed(0)
|
||||
generator = torch.Generator("cpu").manual_seed(0)
|
||||
|
||||
pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_512, torch_dtype=torch.float16)
|
||||
pipe.enable_model_cpu_offload()
|
||||
|
||||
prompt = self.prompt
|
||||
|
||||
image = pipe(prompt, generator=generator, output_type="np").images
|
||||
image = pipe(prompt, generator=generator, num_inference_steps=2, output_type="np").images
|
||||
|
||||
image_slice = image[0, -3:, -3:, -1]
|
||||
expected_slice = np.array([0.3477, 0.3882, 0.4541, 0.3413, 0.3821, 0.4463, 0.4001, 0.4409, 0.4958])
|
||||
|
||||
expected_slice = np.array([0.2637, 0.291, 0.2939, 0.207, 0.2512, 0.2783, 0.2168, 0.2324, 0.2817])
|
||||
|
||||
max_diff = np.abs(image_slice.flatten() - expected_slice).max()
|
||||
self.assertLessEqual(max_diff, 1e-3)
|
||||
max_diff = numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice)
|
||||
self.assertLessEqual(max_diff, 1e-4)
|
||||
|
||||
def test_pixart_1024_without_resolution_binning(self):
|
||||
generator = torch.manual_seed(0)
|
||||
@@ -372,7 +376,7 @@ class PixArtAlphaPipelineIntegrationTests(unittest.TestCase):
|
||||
|
||||
prompt = self.prompt
|
||||
height, width = 1024, 768
|
||||
num_inference_steps = 10
|
||||
num_inference_steps = 2
|
||||
|
||||
image = pipe(
|
||||
prompt,
|
||||
@@ -406,7 +410,7 @@ class PixArtAlphaPipelineIntegrationTests(unittest.TestCase):
|
||||
|
||||
prompt = self.prompt
|
||||
height, width = 512, 768
|
||||
num_inference_steps = 10
|
||||
num_inference_steps = 2
|
||||
|
||||
image = pipe(
|
||||
prompt,
|
||||
|
||||
Reference in New Issue
Block a user