mirror of
https://github.com/huggingface/diffusers.git
synced 2025-12-14 00:14:23 +08:00
Compare commits
274 Commits
tests-cons
...
add-sharde
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4397463f3c | ||
|
|
2b4dde0958 | ||
|
|
715a7da1b2 | ||
|
|
14d224d4e6 | ||
|
|
540399f540 | ||
|
|
f088027e93 | ||
|
|
464fe95605 | ||
|
|
c6e08ecd46 | ||
|
|
4ad7a1f5fd | ||
|
|
1f81fbe274 | ||
|
|
f7742b0b68 | ||
|
|
3e960d1eaa | ||
|
|
1287b16958 | ||
|
|
1c098292d2 | ||
|
|
6bdbd988a1 | ||
|
|
589931ca79 | ||
|
|
675be88f00 | ||
|
|
df4ad6f4ac | ||
|
|
bc90c28bc9 | ||
|
|
f040c27d4c | ||
|
|
138fac703a | ||
|
|
2a022583b1 | ||
|
|
468ae09ed8 | ||
|
|
3fca52022f | ||
|
|
c375903db5 | ||
|
|
b9d52fca1d | ||
|
|
2ada094bff | ||
|
|
85b03604ea | ||
|
|
f1f542bdd4 | ||
|
|
a9c403c001 | ||
|
|
e7b9a0762b | ||
|
|
5e90a2596b | ||
|
|
8eb17315c8 | ||
|
|
1b73947c47 | ||
|
|
4bac3411a7 | ||
|
|
77e4ee7006 | ||
|
|
c71c19c5e6 | ||
|
|
adc31940a9 | ||
|
|
963ee05d16 | ||
|
|
668e34c6e0 | ||
|
|
25d7bb3ea6 | ||
|
|
394b8fb996 | ||
|
|
a1d55e14ba | ||
|
|
e5564d45bf | ||
|
|
2921a20194 | ||
|
|
3376252d71 | ||
|
|
16170c69ae | ||
|
|
4408047ac5 | ||
|
|
34fab8b511 | ||
|
|
298ce67999 | ||
|
|
d2e7a19fd5 | ||
|
|
cd3082008e | ||
|
|
f3209b5b55 | ||
|
|
96399c3ec6 | ||
|
|
10d3220abe | ||
|
|
f69511ecc6 | ||
|
|
d2b10b1f4f | ||
|
|
23a2cd3337 | ||
|
|
4edde134f6 | ||
|
|
074a7cc3c5 | ||
|
|
6bfd13f07a | ||
|
|
eeb70033a6 | ||
|
|
c4a4750cb3 | ||
|
|
a6375d4101 | ||
|
|
8e1b7a084a | ||
|
|
6946facf69 | ||
|
|
130dd936bb | ||
|
|
a899e42fc7 | ||
|
|
f96e4a16ad | ||
|
|
9c6e9684a2 | ||
|
|
2e4841ef1e | ||
|
|
8bea943714 | ||
|
|
614d0c64e9 | ||
|
|
b1a2c0d577 | ||
|
|
06ee907b73 | ||
|
|
896fb6d8d7 | ||
|
|
7f51f286a5 | ||
|
|
829f6defa4 | ||
|
|
24bdf4b215 | ||
|
|
95e0c3757d | ||
|
|
6cf0be5d3d | ||
|
|
ec068f9b5b | ||
|
|
0240d4191a | ||
|
|
04717fd861 | ||
|
|
6fd458e99d | ||
|
|
1066fe4cbc | ||
|
|
d38f69ea25 | ||
|
|
0a1c13af79 | ||
|
|
0028c34432 | ||
|
|
d457beed92 | ||
|
|
1d9a6a81b9 | ||
|
|
4e0984db6c | ||
|
|
83bc6c94ea | ||
|
|
0d68ddf327 | ||
|
|
7d887118b9 | ||
|
|
b63c956860 | ||
|
|
716b2062bf | ||
|
|
5fd6825d25 | ||
|
|
e0fae6fd73 | ||
|
|
ec1aded12e | ||
|
|
151a56b80e | ||
|
|
a3faf3f260 | ||
|
|
867a2b0cf9 | ||
|
|
98730c5dd7 | ||
|
|
7ebd359446 | ||
|
|
d3881f35b7 | ||
|
|
48207d6689 | ||
|
|
2f6f426f66 | ||
|
|
a0542c1917 | ||
|
|
a8ad6664c2 | ||
|
|
14f7b545bd | ||
|
|
07cd20041c | ||
|
|
6ddbf6222c | ||
|
|
3ff39e8e86 | ||
|
|
6be43bd855 | ||
|
|
dc89434bdc | ||
|
|
4d633bfe9a | ||
|
|
174cf868ea | ||
|
|
413604405f | ||
|
|
bc108e1533 | ||
|
|
86555c9f59 | ||
|
|
983dec3bf7 | ||
|
|
f9fa8a868c | ||
|
|
05be622b1c | ||
|
|
352d96eb82 | ||
|
|
3511a9623f | ||
|
|
42cae93b94 | ||
|
|
a2ecce26bc | ||
|
|
9e00b727ad | ||
|
|
f7a4626f4b | ||
|
|
f4a44b7707 | ||
|
|
3bc3b48c10 | ||
|
|
581d8aacf7 | ||
|
|
ba1bfac20b | ||
|
|
5edd0b34fa | ||
|
|
3a28e36aa1 | ||
|
|
3393c01c9d | ||
|
|
1fa8dbc63a | ||
|
|
0ab6dc0f23 | ||
|
|
b2030a249c | ||
|
|
67bef2027c | ||
|
|
aa676c641f | ||
|
|
e6df8edadc | ||
|
|
80cfaebaa1 | ||
|
|
ba82414106 | ||
|
|
fe5f035f79 | ||
|
|
b3d10d6d65 | ||
|
|
b82f9f5666 | ||
|
|
6a5ba1b719 | ||
|
|
4d40c9140c | ||
|
|
0ab63ff647 | ||
|
|
db33af065b | ||
|
|
1096f88e2b | ||
|
|
cef4a51223 | ||
|
|
edf5ba6a17 | ||
|
|
9941f1f61b | ||
|
|
46a9db0336 | ||
|
|
370146e4e0 | ||
|
|
5cd45c24bf | ||
|
|
67b3fe0aae | ||
|
|
baab065679 | ||
|
|
509741aea7 | ||
|
|
e1df77ee1e | ||
|
|
fdb1baa05c | ||
|
|
6529ee67ec | ||
|
|
df2bc5ef28 | ||
|
|
a7bf77fc28 | ||
|
|
0f0defdb65 | ||
|
|
19df9f3ec0 | ||
|
|
d6ca120987 | ||
|
|
fb7ae0184f | ||
|
|
70f8d4b488 | ||
|
|
6c60e430ee | ||
|
|
1221b28eac | ||
|
|
746f603b20 | ||
|
|
2afea72d29 | ||
|
|
0f111ab794 | ||
|
|
4dd7aaa06f | ||
|
|
d27e996ccd | ||
|
|
72780ff5b1 | ||
|
|
69fdb8720f | ||
|
|
b2140a895b | ||
|
|
e0e8c58f64 | ||
|
|
cbea5d1725 | ||
|
|
a1245c2c61 | ||
|
|
cdda94f412 | ||
|
|
5b830aa356 | ||
|
|
9e7bae9881 | ||
|
|
b41ce1e090 | ||
|
|
95d3748453 | ||
|
|
44aa9e566d | ||
|
|
fdb05f54ef | ||
|
|
98ba18ba55 | ||
|
|
5bb38586a9 | ||
|
|
ec9e88139a | ||
|
|
e4f8dca9a0 | ||
|
|
0267c5233a | ||
|
|
be4afa0bb4 | ||
|
|
04f4bd54ea | ||
|
|
82be58c512 | ||
|
|
6695635696 | ||
|
|
b934215d4c | ||
|
|
5ed3abd371 | ||
|
|
1087a510b5 | ||
|
|
305f2b4498 | ||
|
|
cb0f3b49cb | ||
|
|
caf9e985df | ||
|
|
c1c42698c9 | ||
|
|
75aab34675 | ||
|
|
35358a2dec | ||
|
|
818f760732 | ||
|
|
f29b93488d | ||
|
|
d50baf0c63 | ||
|
|
c2217142bd | ||
|
|
8edaf3b79c | ||
|
|
23e091564f | ||
|
|
0d23645bd1 | ||
|
|
7fa3e5b0f6 | ||
|
|
49b959b540 | ||
|
|
58237364b1 | ||
|
|
3e35628873 | ||
|
|
6a479588db | ||
|
|
fa489eaed6 | ||
|
|
0d7c479023 | ||
|
|
ce97d7e19b | ||
|
|
44ba90caff | ||
|
|
3c85a57297 | ||
|
|
03ca11318e | ||
|
|
3ffa7b46e5 | ||
|
|
c1b2a89e34 | ||
|
|
435d37ce5a | ||
|
|
5915c2985d | ||
|
|
21a7ff12a7 | ||
|
|
8909ab4b19 | ||
|
|
c1edb03c37 | ||
|
|
0d08370263 | ||
|
|
b8ccb46259 | ||
|
|
725ead2f5e | ||
|
|
26a7851e1e | ||
|
|
3fd31eef51 | ||
|
|
b02e2113ff | ||
|
|
21f023ec1a | ||
|
|
31d9f9ea77 | ||
|
|
f53352f750 | ||
|
|
83ae24ce2d | ||
|
|
8af793b2d4 | ||
|
|
eb96ff0d59 | ||
|
|
a38dd79512 | ||
|
|
b1c5817a89 | ||
|
|
235d34cf56 | ||
|
|
5029673987 | ||
|
|
56bd7e67c2 | ||
|
|
9d16daaf64 | ||
|
|
8e4ca1b6b2 | ||
|
|
0d2d424fbe | ||
|
|
e24e54fdfa | ||
|
|
ebc99a77aa | ||
|
|
fa750a15bd | ||
|
|
181688012a | ||
|
|
142f353e1c | ||
|
|
b833d0fc80 | ||
|
|
e963621649 | ||
|
|
39215aa30e | ||
|
|
9ef43f38d4 | ||
|
|
88018fcf20 | ||
|
|
7404f1e9dc | ||
|
|
5a69227863 | ||
|
|
fc9fecc217 | ||
|
|
065f251766 | ||
|
|
21c747fa0f | ||
|
|
09129842e7 | ||
|
|
33b363edfa | ||
|
|
a9dd86029e | ||
|
|
9100652494 |
2
.github/workflows/benchmark.yml
vendored
2
.github/workflows/benchmark.yml
vendored
@@ -39,7 +39,7 @@ jobs:
|
|||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
- name: Diffusers Benchmarking
|
- name: Diffusers Benchmarking
|
||||||
env:
|
env:
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.DIFFUSERS_BOT_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_BOT_TOKEN }}
|
||||||
BASE_PATH: benchmark_outputs
|
BASE_PATH: benchmark_outputs
|
||||||
run: |
|
run: |
|
||||||
export TOTAL_GPU_MEMORY=$(python -c "import torch; print(torch.cuda.get_device_properties(0).total_memory / (1024**3))")
|
export TOTAL_GPU_MEMORY=$(python -c "import torch; print(torch.cuda.get_device_properties(0).total_memory / (1024**3))")
|
||||||
|
|||||||
24
.github/workflows/build_docker_images.yml
vendored
24
.github/workflows/build_docker_images.yml
vendored
@@ -69,6 +69,7 @@ jobs:
|
|||||||
- diffusers-flax-tpu
|
- diffusers-flax-tpu
|
||||||
- diffusers-onnxruntime-cpu
|
- diffusers-onnxruntime-cpu
|
||||||
- diffusers-onnxruntime-cuda
|
- diffusers-onnxruntime-cuda
|
||||||
|
- diffusers-doc-builder
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
@@ -90,24 +91,11 @@ jobs:
|
|||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
id: slack
|
id: slack
|
||||||
uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001
|
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||||
with:
|
with:
|
||||||
# Slack channel id, channel name, or user id to post message.
|
# Slack channel id, channel name, or user id to post message.
|
||||||
# See also: https://api.slack.com/methods/chat.postMessage#channels
|
# See also: https://api.slack.com/methods/chat.postMessage#channels
|
||||||
channel-id: ${{ env.CI_SLACK_CHANNEL }}
|
slack_channel: ${{ env.CI_SLACK_CHANNEL }}
|
||||||
# For posting a rich message using Block Kit
|
title: "🤗 Results of the ${{ matrix.image-name }} Docker Image build"
|
||||||
payload: |
|
status: ${{ job.status }}
|
||||||
{
|
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||||
"text": "${{ matrix.image-name }} Docker Image build result: ${{ job.status }}\n${{ github.event.head_commit.url }}",
|
|
||||||
"blocks": [
|
|
||||||
{
|
|
||||||
"type": "section",
|
|
||||||
"text": {
|
|
||||||
"type": "mrkdwn",
|
|
||||||
"text": "${{ matrix.image-name }} Docker Image build result: ${{ job.status }}\n${{ github.event.head_commit.url }}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
env:
|
|
||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
|
||||||
2
.github/workflows/build_documentation.yml
vendored
2
.github/workflows/build_documentation.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
|||||||
package: diffusers
|
package: diffusers
|
||||||
notebook_folder: diffusers_doc
|
notebook_folder: diffusers_doc
|
||||||
languages: en ko zh ja pt
|
languages: en ko zh ja pt
|
||||||
|
custom_container: diffusers/diffusers-doc-builder
|
||||||
secrets:
|
secrets:
|
||||||
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||||
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||||
|
|||||||
1
.github/workflows/build_pr_documentation.yml
vendored
1
.github/workflows/build_pr_documentation.yml
vendored
@@ -20,3 +20,4 @@ jobs:
|
|||||||
install_libgl1: true
|
install_libgl1: true
|
||||||
package: diffusers
|
package: diffusers
|
||||||
languages: en ko zh ja pt
|
languages: en ko zh ja pt
|
||||||
|
custom_container: diffusers/diffusers-doc-builder
|
||||||
|
|||||||
89
.github/workflows/mirror_community_pipeline.yml
vendored
Normal file
89
.github/workflows/mirror_community_pipeline.yml
vendored
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
name: Mirror Community Pipeline
|
||||||
|
|
||||||
|
on:
|
||||||
|
# Push changes on the main branch
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths:
|
||||||
|
- 'examples/community/**.py'
|
||||||
|
|
||||||
|
# And on tag creation (e.g. `v0.28.1`)
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
# Manual trigger with ref input
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
ref:
|
||||||
|
description: "Either 'main' or a tag ref"
|
||||||
|
required: true
|
||||||
|
default: 'main'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
mirror_community_pipeline:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
# Checkout to correct ref
|
||||||
|
# If workflow dispatch
|
||||||
|
# If ref is 'main', set:
|
||||||
|
# CHECKOUT_REF=refs/heads/main
|
||||||
|
# PATH_IN_REPO=main
|
||||||
|
# Else it must be a tag. Set:
|
||||||
|
# CHECKOUT_REF=refs/tags/{tag}
|
||||||
|
# PATH_IN_REPO={tag}
|
||||||
|
# If not workflow dispatch
|
||||||
|
# If ref is 'refs/heads/main' => set 'main'
|
||||||
|
# Else it must be a tag => set {tag}
|
||||||
|
- name: Set checkout_ref and path_in_repo
|
||||||
|
run: |
|
||||||
|
if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
|
||||||
|
if [ -z "${{ github.event.inputs.ref }}" ]; then
|
||||||
|
echo "Error: Missing ref input"
|
||||||
|
exit 1
|
||||||
|
elif [ "${{ github.event.inputs.ref }}" == "main" ]; then
|
||||||
|
echo "CHECKOUT_REF=refs/heads/main" >> $GITHUB_ENV
|
||||||
|
echo "PATH_IN_REPO=main" >> $GITHUB_ENV
|
||||||
|
else
|
||||||
|
echo "CHECKOUT_REF=refs/tags/${{ github.event.inputs.ref }}" >> $GITHUB_ENV
|
||||||
|
echo "PATH_IN_REPO=${{ github.event.inputs.ref }}" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
elif [ "${{ github.ref }}" == "refs/heads/main" ]; then
|
||||||
|
echo "CHECKOUT_REF=${{ github.ref }}" >> $GITHUB_ENV
|
||||||
|
echo "PATH_IN_REPO=main" >> $GITHUB_ENV
|
||||||
|
else
|
||||||
|
# e.g. refs/tags/v0.28.1 -> v0.28.1
|
||||||
|
echo "CHECKOUT_REF=${{ github.ref }}" >> $GITHUB_ENV
|
||||||
|
echo "PATH_IN_REPO=$(echo ${{ github.ref }} | sed 's/^refs\/tags\///')" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
- name: Print env vars
|
||||||
|
run: |
|
||||||
|
echo "CHECKOUT_REF: ${{ env.CHECKOUT_REF }}"
|
||||||
|
echo "PATH_IN_REPO: ${{ env.PATH_IN_REPO }}"
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{ env.CHECKOUT_REF }}
|
||||||
|
|
||||||
|
# Setup + install dependencies
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: "3.10"
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install --upgrade huggingface_hub
|
||||||
|
|
||||||
|
# Check secret is set
|
||||||
|
- name: whoami
|
||||||
|
run: huggingface-cli whoami
|
||||||
|
env:
|
||||||
|
HF_TOKEN: ${{ secrets.HF_TOKEN_MIRROR_COMMUNITY_PIPELINES }}
|
||||||
|
|
||||||
|
# Push to HF! (under subfolder based on checkout ref)
|
||||||
|
# https://huggingface.co/datasets/diffusers/community-pipelines-mirror
|
||||||
|
- name: Mirror community pipeline to HF
|
||||||
|
run: huggingface-cli upload diffusers/community-pipelines-mirror ./examples/community ${PATH_IN_REPO} --repo-type dataset
|
||||||
|
env:
|
||||||
|
PATH_IN_REPO: ${{ env.PATH_IN_REPO }}
|
||||||
|
HF_TOKEN: ${{ secrets.HF_TOKEN_MIRROR_COMMUNITY_PIPELINES }}
|
||||||
24
.github/workflows/nightly_tests.yml
vendored
24
.github/workflows/nightly_tests.yml
vendored
@@ -19,7 +19,7 @@ env:
|
|||||||
jobs:
|
jobs:
|
||||||
setup_torch_cuda_pipeline_matrix:
|
setup_torch_cuda_pipeline_matrix:
|
||||||
name: Setup Torch Pipelines Matrix
|
name: Setup Torch Pipelines Matrix
|
||||||
runs-on: ubuntu-latest
|
runs-on: diffusers/diffusers-pytorch-cpu
|
||||||
outputs:
|
outputs:
|
||||||
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
||||||
steps:
|
steps:
|
||||||
@@ -59,7 +59,7 @@ jobs:
|
|||||||
runs-on: [single-gpu, nvidia-gpu, t4, ci]
|
runs-on: [single-gpu, nvidia-gpu, t4, ci]
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0
|
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --gpus 0
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -81,7 +81,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Nightly PyTorch CUDA checkpoint (pipelines) tests
|
- name: Nightly PyTorch CUDA checkpoint (pipelines) tests
|
||||||
env:
|
env:
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
@@ -112,7 +112,7 @@ jobs:
|
|||||||
|
|
||||||
run_nightly_tests_for_other_torch_modules:
|
run_nightly_tests_for_other_torch_modules:
|
||||||
name: Torch Non-Pipelines CUDA Nightly Tests
|
name: Torch Non-Pipelines CUDA Nightly Tests
|
||||||
runs-on: docker-gpu
|
runs-on: [single-gpu, nvidia-gpu, t4, ci]
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0
|
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0
|
||||||
@@ -141,7 +141,7 @@ jobs:
|
|||||||
- name: Run nightly PyTorch CUDA tests for non-pipeline modules
|
- name: Run nightly PyTorch CUDA tests for non-pipeline modules
|
||||||
if: ${{ matrix.module != 'examples'}}
|
if: ${{ matrix.module != 'examples'}}
|
||||||
env:
|
env:
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
@@ -154,7 +154,7 @@ jobs:
|
|||||||
- name: Run nightly example tests with Torch
|
- name: Run nightly example tests with Torch
|
||||||
if: ${{ matrix.module == 'examples' }}
|
if: ${{ matrix.module == 'examples' }}
|
||||||
env:
|
env:
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
@@ -185,7 +185,7 @@ jobs:
|
|||||||
|
|
||||||
run_lora_nightly_tests:
|
run_lora_nightly_tests:
|
||||||
name: Nightly LoRA Tests with PEFT and TORCH
|
name: Nightly LoRA Tests with PEFT and TORCH
|
||||||
runs-on: docker-gpu
|
runs-on: [single-gpu, nvidia-gpu, t4, ci]
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0
|
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0
|
||||||
@@ -211,7 +211,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Run nightly LoRA tests with PEFT and Torch
|
- name: Run nightly LoRA tests with PEFT and Torch
|
||||||
env:
|
env:
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
@@ -269,7 +269,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Run nightly Flax TPU tests
|
- name: Run nightly Flax TPU tests
|
||||||
env:
|
env:
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 0 \
|
python -m pytest -n 0 \
|
||||||
-s -v -k "Flax" \
|
-s -v -k "Flax" \
|
||||||
@@ -298,7 +298,7 @@ jobs:
|
|||||||
|
|
||||||
run_nightly_onnx_tests:
|
run_nightly_onnx_tests:
|
||||||
name: Nightly ONNXRuntime CUDA tests on Ubuntu
|
name: Nightly ONNXRuntime CUDA tests on Ubuntu
|
||||||
runs-on: docker-gpu
|
runs-on: [single-gpu, nvidia-gpu, t4, ci]
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-onnxruntime-cuda
|
image: diffusers/diffusers-onnxruntime-cuda
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
||||||
@@ -324,7 +324,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Run nightly ONNXRuntime CUDA tests
|
- name: Run nightly ONNXRuntime CUDA tests
|
||||||
env:
|
env:
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "Onnx" \
|
-s -v -k "Onnx" \
|
||||||
@@ -390,7 +390,7 @@ jobs:
|
|||||||
shell: arch -arch arm64 bash {0}
|
shell: arch -arch arm64 bash {0}
|
||||||
env:
|
env:
|
||||||
HF_HOME: /System/Volumes/Data/mnt/cache
|
HF_HOME: /System/Volumes/Data/mnt/cache
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
||||||
--report-log=tests_torch_mps.log \
|
--report-log=tests_torch_mps.log \
|
||||||
|
|||||||
1
.github/workflows/pr_dependency_test.yml
vendored
1
.github/workflows/pr_dependency_test.yml
vendored
@@ -33,4 +33,3 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||||
pytest tests/others/test_dependencies.py
|
pytest tests/others/test_dependencies.py
|
||||||
|
|
||||||
6
.github/workflows/pr_test_fetcher.yml
vendored
6
.github/workflows/pr_test_fetcher.yml
vendored
@@ -15,7 +15,7 @@ concurrency:
|
|||||||
jobs:
|
jobs:
|
||||||
setup_pr_tests:
|
setup_pr_tests:
|
||||||
name: Setup PR Tests
|
name: Setup PR Tests
|
||||||
runs-on: docker-cpu
|
runs-on: [ self-hosted, intel-cpu, 8-cpu, ci ]
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cpu
|
image: diffusers/diffusers-pytorch-cpu
|
||||||
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
||||||
@@ -73,7 +73,7 @@ jobs:
|
|||||||
max-parallel: 2
|
max-parallel: 2
|
||||||
matrix:
|
matrix:
|
||||||
modules: ${{ fromJson(needs.setup_pr_tests.outputs.matrix) }}
|
modules: ${{ fromJson(needs.setup_pr_tests.outputs.matrix) }}
|
||||||
runs-on: docker-cpu
|
runs-on: [ self-hosted, intel-cpu, 8-cpu, ci ]
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cpu
|
image: diffusers/diffusers-pytorch-cpu
|
||||||
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
||||||
@@ -123,7 +123,7 @@ jobs:
|
|||||||
config:
|
config:
|
||||||
- name: Hub tests for models, schedulers, and pipelines
|
- name: Hub tests for models, schedulers, and pipelines
|
||||||
framework: hub_tests_pytorch
|
framework: hub_tests_pytorch
|
||||||
runner: docker-cpu
|
runner: [ self-hosted, intel-cpu, 8-cpu, ci ]
|
||||||
image: diffusers/diffusers-pytorch-cpu
|
image: diffusers/diffusers-pytorch-cpu
|
||||||
report: torch_hub
|
report: torch_hub
|
||||||
|
|
||||||
|
|||||||
18
.github/workflows/pr_test_peft_backend.yml
vendored
18
.github/workflows/pr_test_peft_backend.yml
vendored
@@ -111,3 +111,21 @@ jobs:
|
|||||||
-s -v \
|
-s -v \
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
tests/lora/
|
tests/lora/
|
||||||
|
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
||||||
|
-s -v \
|
||||||
|
--make-reports=tests_models_lora_${{ matrix.config.report }} \
|
||||||
|
tests/models/ -k "lora"
|
||||||
|
|
||||||
|
|
||||||
|
- name: Failure short reports
|
||||||
|
if: ${{ failure() }}
|
||||||
|
run: |
|
||||||
|
cat reports/tests_${{ matrix.config.report }}_failures_short.txt
|
||||||
|
cat reports/tests_models_lora_${{ matrix.config.report }}_failures_short.txt
|
||||||
|
|
||||||
|
- name: Test suite reports artifacts
|
||||||
|
if: ${{ always() }}
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: pr_${{ matrix.config.report }}_test_reports
|
||||||
|
path: reports
|
||||||
2
.github/workflows/pr_tests.yml
vendored
2
.github/workflows/pr_tests.yml
vendored
@@ -156,7 +156,7 @@ jobs:
|
|||||||
if: ${{ matrix.config.framework == 'pytorch_examples' }}
|
if: ${{ matrix.config.framework == 'pytorch_examples' }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||||
python -m uv pip install peft
|
python -m uv pip install peft timm
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
examples
|
examples
|
||||||
|
|||||||
81
.github/workflows/push_tests.yml
vendored
81
.github/workflows/push_tests.yml
vendored
@@ -21,7 +21,9 @@ env:
|
|||||||
jobs:
|
jobs:
|
||||||
setup_torch_cuda_pipeline_matrix:
|
setup_torch_cuda_pipeline_matrix:
|
||||||
name: Setup Torch Pipelines CUDA Slow Tests Matrix
|
name: Setup Torch Pipelines CUDA Slow Tests Matrix
|
||||||
runs-on: ubuntu-latest
|
runs-on: [ self-hosted, intel-cpu, 8-cpu, ci ]
|
||||||
|
container:
|
||||||
|
image: diffusers/diffusers-pytorch-cpu
|
||||||
outputs:
|
outputs:
|
||||||
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
||||||
steps:
|
steps:
|
||||||
@@ -29,14 +31,13 @@ jobs:
|
|||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- name: Set up Python
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: "3.8"
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
pip install -e .
|
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||||
pip install huggingface_hub
|
python -m uv pip install -e [quality,test]
|
||||||
|
- name: Environment
|
||||||
|
run: |
|
||||||
|
python utils/print_env.py
|
||||||
- name: Fetch Pipeline Matrix
|
- name: Fetch Pipeline Matrix
|
||||||
id: fetch_pipeline_matrix
|
id: fetch_pipeline_matrix
|
||||||
run: |
|
run: |
|
||||||
@@ -55,12 +56,13 @@ jobs:
|
|||||||
needs: setup_torch_cuda_pipeline_matrix
|
needs: setup_torch_cuda_pipeline_matrix
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
|
max-parallel: 8
|
||||||
matrix:
|
matrix:
|
||||||
module: ${{ fromJson(needs.setup_torch_cuda_pipeline_matrix.outputs.pipeline_test_matrix) }}
|
module: ${{ fromJson(needs.setup_torch_cuda_pipeline_matrix.outputs.pipeline_test_matrix) }}
|
||||||
runs-on: [single-gpu, nvidia-gpu, t4, ci]
|
runs-on: [single-gpu, nvidia-gpu, t4, ci]
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0 --privileged
|
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --gpus 0
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -69,12 +71,6 @@ jobs:
|
|||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Tailscale
|
|
||||||
uses: huggingface/tailscale-action@v1
|
|
||||||
with:
|
|
||||||
authkey: ${{ secrets.TAILSCALE_SSH_AUTHKEY }}
|
|
||||||
slackChannel: ${{ secrets.SLACK_CIFEEDBACK_CHANNEL }}
|
|
||||||
slackToken: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||||
@@ -85,7 +81,7 @@ jobs:
|
|||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
- name: Slow PyTorch CUDA checkpoint tests on Ubuntu
|
- name: Slow PyTorch CUDA checkpoint tests on Ubuntu
|
||||||
env:
|
env:
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
@@ -93,18 +89,11 @@ jobs:
|
|||||||
-s -v -k "not Flax and not Onnx" \
|
-s -v -k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
||||||
tests/pipelines/${{ matrix.module }}
|
tests/pipelines/${{ matrix.module }}
|
||||||
- name: Tailscale Wait
|
|
||||||
if: ${{ failure() || runner.debug == '1' }}
|
|
||||||
uses: huggingface/tailscale-action@v1
|
|
||||||
with:
|
|
||||||
waitForSSH: true
|
|
||||||
authkey: ${{ secrets.TAILSCALE_SSH_AUTHKEY }}
|
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
run: |
|
run: |
|
||||||
cat reports/tests_pipeline_${{ matrix.module }}_cuda_stats.txt
|
cat reports/tests_pipeline_${{ matrix.module }}_cuda_stats.txt
|
||||||
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
|
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v2
|
||||||
@@ -114,16 +103,16 @@ jobs:
|
|||||||
|
|
||||||
torch_cuda_tests:
|
torch_cuda_tests:
|
||||||
name: Torch CUDA Tests
|
name: Torch CUDA Tests
|
||||||
runs-on: docker-gpu
|
runs-on: [single-gpu, nvidia-gpu, t4, ci]
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0
|
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --gpus 0
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
module: [models, schedulers, lora, others]
|
module: [models, schedulers, lora, others, single_file]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -142,7 +131,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Run slow PyTorch CUDA tests
|
- name: Run slow PyTorch CUDA tests
|
||||||
env:
|
env:
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
@@ -166,10 +155,10 @@ jobs:
|
|||||||
|
|
||||||
peft_cuda_tests:
|
peft_cuda_tests:
|
||||||
name: PEFT CUDA Tests
|
name: PEFT CUDA Tests
|
||||||
runs-on: docker-gpu
|
runs-on: [single-gpu, nvidia-gpu, t4, ci]
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0
|
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --gpus 0
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -192,7 +181,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Run slow PEFT CUDA tests
|
- name: Run slow PEFT CUDA tests
|
||||||
env:
|
env:
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
@@ -200,12 +189,17 @@ jobs:
|
|||||||
-s -v -k "not Flax and not Onnx and not PEFTLoRALoading" \
|
-s -v -k "not Flax and not Onnx and not PEFTLoRALoading" \
|
||||||
--make-reports=tests_peft_cuda \
|
--make-reports=tests_peft_cuda \
|
||||||
tests/lora/
|
tests/lora/
|
||||||
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
|
-s -v -k "lora and not Flax and not Onnx and not PEFTLoRALoading" \
|
||||||
|
--make-reports=tests_peft_cuda_models_lora \
|
||||||
|
tests/models/
|
||||||
|
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
run: |
|
run: |
|
||||||
cat reports/tests_peft_cuda_stats.txt
|
cat reports/tests_peft_cuda_stats.txt
|
||||||
cat reports/tests_peft_cuda_failures_short.txt
|
cat reports/tests_peft_cuda_failures_short.txt
|
||||||
|
cat reports/tests_peft_cuda_models_lora_failures_short.txt
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
@@ -219,7 +213,7 @@ jobs:
|
|||||||
runs-on: docker-tpu
|
runs-on: docker-tpu
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-flax-tpu
|
image: diffusers/diffusers-flax-tpu
|
||||||
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --privileged
|
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --privileged
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -241,7 +235,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Run slow Flax TPU tests
|
- name: Run slow Flax TPU tests
|
||||||
env:
|
env:
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 0 \
|
python -m pytest -n 0 \
|
||||||
-s -v -k "Flax" \
|
-s -v -k "Flax" \
|
||||||
@@ -263,10 +257,10 @@ jobs:
|
|||||||
|
|
||||||
onnx_cuda_tests:
|
onnx_cuda_tests:
|
||||||
name: ONNX CUDA Tests
|
name: ONNX CUDA Tests
|
||||||
runs-on: docker-gpu
|
runs-on: [single-gpu, nvidia-gpu, t4, ci]
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-onnxruntime-cuda
|
image: diffusers/diffusers-onnxruntime-cuda
|
||||||
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0
|
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -288,7 +282,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Run slow ONNXRuntime CUDA tests
|
- name: Run slow ONNXRuntime CUDA tests
|
||||||
env:
|
env:
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "Onnx" \
|
-s -v -k "Onnx" \
|
||||||
@@ -311,11 +305,11 @@ jobs:
|
|||||||
run_torch_compile_tests:
|
run_torch_compile_tests:
|
||||||
name: PyTorch Compile CUDA tests
|
name: PyTorch Compile CUDA tests
|
||||||
|
|
||||||
runs-on: docker-gpu
|
runs-on: [single-gpu, nvidia-gpu, t4, ci]
|
||||||
|
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-compile-cuda
|
image: diffusers/diffusers-pytorch-compile-cuda
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
@@ -335,7 +329,7 @@ jobs:
|
|||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
- name: Run example tests on GPU
|
- name: Run example tests on GPU
|
||||||
env:
|
env:
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
@@ -352,11 +346,11 @@ jobs:
|
|||||||
run_xformers_tests:
|
run_xformers_tests:
|
||||||
name: PyTorch xformers CUDA tests
|
name: PyTorch xformers CUDA tests
|
||||||
|
|
||||||
runs-on: docker-gpu
|
runs-on: [single-gpu, nvidia-gpu, t4, ci]
|
||||||
|
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-xformers-cuda
|
image: diffusers/diffusers-pytorch-xformers-cuda
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
@@ -376,7 +370,7 @@ jobs:
|
|||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
- name: Run example tests on GPU
|
- name: Run example tests on GPU
|
||||||
env:
|
env:
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
@@ -393,11 +387,11 @@ jobs:
|
|||||||
run_examples_tests:
|
run_examples_tests:
|
||||||
name: Examples PyTorch CUDA tests on Ubuntu
|
name: Examples PyTorch CUDA tests on Ubuntu
|
||||||
|
|
||||||
runs-on: docker-gpu
|
runs-on: [single-gpu, nvidia-gpu, t4, ci]
|
||||||
|
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
@@ -421,9 +415,10 @@ jobs:
|
|||||||
|
|
||||||
- name: Run example tests on GPU
|
- name: Run example tests on GPU
|
||||||
env:
|
env:
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||||
|
python -m uv pip install timm
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
|
||||||
|
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
|
|||||||
2
.github/workflows/push_tests_fast.yml
vendored
2
.github/workflows/push_tests_fast.yml
vendored
@@ -107,7 +107,7 @@ jobs:
|
|||||||
if: ${{ matrix.config.framework == 'pytorch_examples' }}
|
if: ${{ matrix.config.framework == 'pytorch_examples' }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||||
python -m uv pip install peft
|
python -m uv pip install peft timm
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
examples
|
examples
|
||||||
|
|||||||
4
.github/workflows/push_tests_mps.yml
vendored
4
.github/workflows/push_tests_mps.yml
vendored
@@ -23,7 +23,7 @@ concurrency:
|
|||||||
jobs:
|
jobs:
|
||||||
run_fast_tests_apple_m1:
|
run_fast_tests_apple_m1:
|
||||||
name: Fast PyTorch MPS tests on MacOS
|
name: Fast PyTorch MPS tests on MacOS
|
||||||
runs-on: [ self-hosted, apple-m1 ]
|
runs-on: macos-13-xlarge
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
@@ -59,7 +59,7 @@ jobs:
|
|||||||
shell: arch -arch arm64 bash {0}
|
shell: arch -arch arm64 bash {0}
|
||||||
env:
|
env:
|
||||||
HF_HOME: /System/Volumes/Data/mnt/cache
|
HF_HOME: /System/Volumes/Data/mnt/cache
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
${CONDA_RUN} python -m pytest -n 0 -s -v --make-reports=tests_torch_mps tests/
|
${CONDA_RUN} python -m pytest -n 0 -s -v --make-reports=tests_torch_mps tests/
|
||||||
|
|
||||||
|
|||||||
73
.github/workflows/run_tests_from_a_pr.yml
vendored
Normal file
73
.github/workflows/run_tests_from_a_pr.yml
vendored
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
name: Check running SLOW tests from a PR (only GPU)
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
docker_image:
|
||||||
|
default: 'diffusers/diffusers-pytorch-cuda'
|
||||||
|
description: 'Name of the Docker image'
|
||||||
|
required: true
|
||||||
|
branch:
|
||||||
|
description: 'PR Branch to test on'
|
||||||
|
required: true
|
||||||
|
test:
|
||||||
|
description: 'Tests to run (e.g.: `tests/models`).'
|
||||||
|
required: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
DIFFUSERS_IS_CI: yes
|
||||||
|
IS_GITHUB_CI: "1"
|
||||||
|
HF_HOME: /mnt/cache
|
||||||
|
OMP_NUM_THREADS: 8
|
||||||
|
MKL_NUM_THREADS: 8
|
||||||
|
PYTEST_TIMEOUT: 600
|
||||||
|
RUN_SLOW: yes
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
run_tests:
|
||||||
|
name: "Run a test on our runner from a PR"
|
||||||
|
runs-on: [single-gpu, nvidia-gpu, t4, ci]
|
||||||
|
container:
|
||||||
|
image: ${{ github.event.inputs.docker_image }}
|
||||||
|
options: --gpus 0 --privileged --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Validate test files input
|
||||||
|
id: validate_test_files
|
||||||
|
env:
|
||||||
|
PY_TEST: ${{ github.event.inputs.test }}
|
||||||
|
run: |
|
||||||
|
if [[ ! "$PY_TEST" =~ ^tests/ ]]; then
|
||||||
|
echo "Error: The input string must start with 'tests/'."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ! "$PY_TEST" =~ ^tests/(models|pipelines) ]]; then
|
||||||
|
echo "Error: The input string must contain either 'models' or 'pipelines' after 'tests/'."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$PY_TEST" == *";"* ]]; then
|
||||||
|
echo "Error: The input string must not contain ';'."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "$PY_TEST"
|
||||||
|
|
||||||
|
- name: Checkout PR branch
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.inputs.branch }}
|
||||||
|
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||||
|
|
||||||
|
|
||||||
|
- name: Install pytest
|
||||||
|
run: |
|
||||||
|
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||||
|
python -m uv pip install -e [quality,test]
|
||||||
|
python -m uv pip install peft
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
env:
|
||||||
|
PY_TEST: ${{ github.event.inputs.test }}
|
||||||
|
run: |
|
||||||
|
pytest "$PY_TEST"
|
||||||
46
.github/workflows/ssh-runner.yml
vendored
Normal file
46
.github/workflows/ssh-runner.yml
vendored
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
name: SSH into runners
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
runner_type:
|
||||||
|
description: 'Type of runner to test (a10 or t4)'
|
||||||
|
required: true
|
||||||
|
docker_image:
|
||||||
|
description: 'Name of the Docker image'
|
||||||
|
required: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
IS_GITHUB_CI: "1"
|
||||||
|
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||||
|
HF_HOME: /mnt/cache
|
||||||
|
DIFFUSERS_IS_CI: yes
|
||||||
|
OMP_NUM_THREADS: 8
|
||||||
|
MKL_NUM_THREADS: 8
|
||||||
|
RUN_SLOW: yes
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
ssh_runner:
|
||||||
|
name: "SSH"
|
||||||
|
runs-on: [single-gpu, nvidia-gpu, "${{ github.event.inputs.runner_type }}", ci]
|
||||||
|
container:
|
||||||
|
image: ${{ github.event.inputs.docker_image }}
|
||||||
|
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --gpus 0 --privileged
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout diffusers
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 2
|
||||||
|
|
||||||
|
- name: NVIDIA-SMI
|
||||||
|
run: |
|
||||||
|
nvidia-smi
|
||||||
|
|
||||||
|
- name: Tailscale # In order to be able to SSH when a test fails
|
||||||
|
uses: huggingface/tailscale-action@main
|
||||||
|
with:
|
||||||
|
authkey: ${{ secrets.TAILSCALE_SSH_AUTHKEY }}
|
||||||
|
slackChannel: ${{ secrets.SLACK_CIFEEDBACK_CHANNEL }}
|
||||||
|
slackToken: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||||
|
waitForSSH: true
|
||||||
15
.github/workflows/trufflehog.yml
vendored
Normal file
15
.github/workflows/trufflehog.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
on:
|
||||||
|
push:
|
||||||
|
|
||||||
|
name: Secret Leaks
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
trufflehog:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Secret Scanning
|
||||||
|
uses: trufflesecurity/trufflehog@main
|
||||||
2
.github/workflows/update_metadata.yml
vendored
2
.github/workflows/update_metadata.yml
vendored
@@ -25,6 +25,6 @@ jobs:
|
|||||||
|
|
||||||
- name: Update metadata
|
- name: Update metadata
|
||||||
env:
|
env:
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.SAYAK_HF_TOKEN }}
|
HF_TOKEN: ${{ secrets.SAYAK_HF_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python utils/update_metadata.py --commit_sha ${{ github.sha }}
|
python utils/update_metadata.py --commit_sha ${{ github.sha }}
|
||||||
|
|||||||
@@ -245,7 +245,7 @@ The official training examples are maintained by the Diffusers' core maintainers
|
|||||||
This is because of the same reasons put forward in [6. Contribute a community pipeline](#6-contribute-a-community-pipeline) for official pipelines vs. community pipelines: It is not feasible for the core maintainers to maintain all possible training methods for diffusion models.
|
This is because of the same reasons put forward in [6. Contribute a community pipeline](#6-contribute-a-community-pipeline) for official pipelines vs. community pipelines: It is not feasible for the core maintainers to maintain all possible training methods for diffusion models.
|
||||||
If the Diffusers core maintainers and the community consider a certain training paradigm to be too experimental or not popular enough, the corresponding training code should be put in the `research_projects` folder and maintained by the author.
|
If the Diffusers core maintainers and the community consider a certain training paradigm to be too experimental or not popular enough, the corresponding training code should be put in the `research_projects` folder and maintained by the author.
|
||||||
|
|
||||||
Both official training and research examples consist of a directory that contains one or more training scripts, a requirements.txt file, and a README.md file. In order for the user to make use of the
|
Both official training and research examples consist of a directory that contains one or more training scripts, a `requirements.txt` file, and a `README.md` file. In order for the user to make use of the
|
||||||
training examples, it is required to clone the repository:
|
training examples, it is required to clone the repository:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -255,7 +255,8 @@ git clone https://github.com/huggingface/diffusers
|
|||||||
as well as to install all additional dependencies required for training:
|
as well as to install all additional dependencies required for training:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install -r /examples/<your-example-folder>/requirements.txt
|
cd diffusers
|
||||||
|
pip install -r examples/<your-example-folder>/requirements.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
Therefore when adding an example, the `requirements.txt` file shall define all pip dependencies required for your training example so that once all those are installed, the user can run the example's training script. See, for example, the [DreamBooth `requirements.txt` file](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/requirements.txt).
|
Therefore when adding an example, the `requirements.txt` file shall define all pip dependencies required for your training example so that once all those are installed, the user can run the example's training script. See, for example, the [DreamBooth `requirements.txt` file](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/requirements.txt).
|
||||||
@@ -355,7 +356,7 @@ You will need basic `git` proficiency to be able to contribute to
|
|||||||
manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro
|
manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro
|
||||||
Git](https://git-scm.com/book/en/v2) is a very good reference.
|
Git](https://git-scm.com/book/en/v2) is a very good reference.
|
||||||
|
|
||||||
Follow these steps to start contributing ([supported Python versions](https://github.com/huggingface/diffusers/blob/main/setup.py#L265)):
|
Follow these steps to start contributing ([supported Python versions](https://github.com/huggingface/diffusers/blob/42f25d601a910dceadaee6c44345896b4cfa9928/setup.py#L270)):
|
||||||
|
|
||||||
1. Fork the [repository](https://github.com/huggingface/diffusers) by
|
1. Fork the [repository](https://github.com/huggingface/diffusers) by
|
||||||
clicking on the 'Fork' button on the repository's page. This creates a copy of the code
|
clicking on the 'Fork' button on the repository's page. This creates a copy of the code
|
||||||
|
|||||||
@@ -63,14 +63,14 @@ Let's walk through more detailed design decisions for each class.
|
|||||||
Pipelines are designed to be easy to use (therefore do not follow [*Simple over easy*](#simple-over-easy) 100%), are not feature complete, and should loosely be seen as examples of how to use [models](#models) and [schedulers](#schedulers) for inference.
|
Pipelines are designed to be easy to use (therefore do not follow [*Simple over easy*](#simple-over-easy) 100%), are not feature complete, and should loosely be seen as examples of how to use [models](#models) and [schedulers](#schedulers) for inference.
|
||||||
|
|
||||||
The following design principles are followed:
|
The following design principles are followed:
|
||||||
- Pipelines follow the single-file policy. All pipelines can be found in individual directories under src/diffusers/pipelines. One pipeline folder corresponds to one diffusion paper/project/release. Multiple pipeline files can be gathered in one pipeline folder, as it’s done for [`src/diffusers/pipelines/stable-diffusion`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/stable_diffusion). If pipelines share similar functionality, one can make use of the [#Copied from mechanism](https://github.com/huggingface/diffusers/blob/125d783076e5bd9785beb05367a2d2566843a271/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L251).
|
- Pipelines follow the single-file policy. All pipelines can be found in individual directories under src/diffusers/pipelines. One pipeline folder corresponds to one diffusion paper/project/release. Multiple pipeline files can be gathered in one pipeline folder, as it’s done for [`src/diffusers/pipelines/stable-diffusion`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/stable_diffusion). If pipelines share similar functionality, one can make use of the [# Copied from mechanism](https://github.com/huggingface/diffusers/blob/125d783076e5bd9785beb05367a2d2566843a271/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L251).
|
||||||
- Pipelines all inherit from [`DiffusionPipeline`].
|
- Pipelines all inherit from [`DiffusionPipeline`].
|
||||||
- Every pipeline consists of different model and scheduler components, that are documented in the [`model_index.json` file](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/model_index.json), are accessible under the same name as attributes of the pipeline and can be shared between pipelines with [`DiffusionPipeline.components`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.components) function.
|
- Every pipeline consists of different model and scheduler components, that are documented in the [`model_index.json` file](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/model_index.json), are accessible under the same name as attributes of the pipeline and can be shared between pipelines with [`DiffusionPipeline.components`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.components) function.
|
||||||
- Every pipeline should be loadable via the [`DiffusionPipeline.from_pretrained`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained) function.
|
- Every pipeline should be loadable via the [`DiffusionPipeline.from_pretrained`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained) function.
|
||||||
- Pipelines should be used **only** for inference.
|
- Pipelines should be used **only** for inference.
|
||||||
- Pipelines should be very readable, self-explanatory, and easy to tweak.
|
- Pipelines should be very readable, self-explanatory, and easy to tweak.
|
||||||
- Pipelines should be designed to build on top of each other and be easy to integrate into higher-level APIs.
|
- Pipelines should be designed to build on top of each other and be easy to integrate into higher-level APIs.
|
||||||
- Pipelines are **not** intended to be feature-complete user interfaces. For future complete user interfaces one should rather have a look at [InvokeAI](https://github.com/invoke-ai/InvokeAI), [Diffuzers](https://github.com/abhishekkrthakur/diffuzers), and [lama-cleaner](https://github.com/Sanster/lama-cleaner).
|
- Pipelines are **not** intended to be feature-complete user interfaces. For feature-complete user interfaces one should rather have a look at [InvokeAI](https://github.com/invoke-ai/InvokeAI), [Diffuzers](https://github.com/abhishekkrthakur/diffuzers), and [lama-cleaner](https://github.com/Sanster/lama-cleaner).
|
||||||
- Every pipeline should have one and only one way to run it via a `__call__` method. The naming of the `__call__` arguments should be shared across all pipelines.
|
- Every pipeline should have one and only one way to run it via a `__call__` method. The naming of the `__call__` arguments should be shared across all pipelines.
|
||||||
- Pipelines should be named after the task they are intended to solve.
|
- Pipelines should be named after the task they are intended to solve.
|
||||||
- In almost all cases, novel diffusion pipelines shall be implemented in a new pipeline folder/file.
|
- In almost all cases, novel diffusion pipelines shall be implemented in a new pipeline folder/file.
|
||||||
@@ -81,7 +81,7 @@ Models are designed as configurable toolboxes that are natural extensions of [Py
|
|||||||
|
|
||||||
The following design principles are followed:
|
The following design principles are followed:
|
||||||
- Models correspond to **a type of model architecture**. *E.g.* the [`UNet2DConditionModel`] class is used for all UNet variations that expect 2D image inputs and are conditioned on some context.
|
- Models correspond to **a type of model architecture**. *E.g.* the [`UNet2DConditionModel`] class is used for all UNet variations that expect 2D image inputs and are conditioned on some context.
|
||||||
- All models can be found in [`src/diffusers/models`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models) and every model architecture shall be defined in its file, e.g. [`unet_2d_condition.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py), [`transformer_2d.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/transformer_2d.py), etc...
|
- All models can be found in [`src/diffusers/models`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models) and every model architecture shall be defined in its file, e.g. [`unets/unet_2d_condition.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unets/unet_2d_condition.py), [`transformers/transformer_2d.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/transformers/transformer_2d.py), etc...
|
||||||
- Models **do not** follow the single-file policy and should make use of smaller model building blocks, such as [`attention.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py), [`resnet.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/resnet.py), [`embeddings.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/embeddings.py), etc... **Note**: This is in stark contrast to Transformers' modeling files and shows that models do not really follow the single-file policy.
|
- Models **do not** follow the single-file policy and should make use of smaller model building blocks, such as [`attention.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py), [`resnet.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/resnet.py), [`embeddings.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/embeddings.py), etc... **Note**: This is in stark contrast to Transformers' modeling files and shows that models do not really follow the single-file policy.
|
||||||
- Models intend to expose complexity, just like PyTorch's `Module` class, and give clear error messages.
|
- Models intend to expose complexity, just like PyTorch's `Module` class, and give clear error messages.
|
||||||
- Models all inherit from `ModelMixin` and `ConfigMixin`.
|
- Models all inherit from `ModelMixin` and `ConfigMixin`.
|
||||||
@@ -90,7 +90,7 @@ The following design principles are followed:
|
|||||||
- To integrate new model checkpoints whose general architecture can be classified as an architecture that already exists in Diffusers, the existing model architecture shall be adapted to make it work with the new checkpoint. One should only create a new file if the model architecture is fundamentally different.
|
- To integrate new model checkpoints whose general architecture can be classified as an architecture that already exists in Diffusers, the existing model architecture shall be adapted to make it work with the new checkpoint. One should only create a new file if the model architecture is fundamentally different.
|
||||||
- Models should be designed to be easily extendable to future changes. This can be achieved by limiting public function arguments, configuration arguments, and "foreseeing" future changes, *e.g.* it is usually better to add `string` "...type" arguments that can easily be extended to new future types instead of boolean `is_..._type` arguments. Only the minimum amount of changes shall be made to existing architectures to make a new model checkpoint work.
|
- Models should be designed to be easily extendable to future changes. This can be achieved by limiting public function arguments, configuration arguments, and "foreseeing" future changes, *e.g.* it is usually better to add `string` "...type" arguments that can easily be extended to new future types instead of boolean `is_..._type` arguments. Only the minimum amount of changes shall be made to existing architectures to make a new model checkpoint work.
|
||||||
- The model design is a difficult trade-off between keeping code readable and concise and supporting many model checkpoints. For most parts of the modeling code, classes shall be adapted for new model checkpoints, while there are some exceptions where it is preferred to add new classes to make sure the code is kept concise and
|
- The model design is a difficult trade-off between keeping code readable and concise and supporting many model checkpoints. For most parts of the modeling code, classes shall be adapted for new model checkpoints, while there are some exceptions where it is preferred to add new classes to make sure the code is kept concise and
|
||||||
readable long-term, such as [UNet blocks](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_blocks.py) and [Attention processors](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
readable long-term, such as [UNet blocks](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unets/unet_2d_blocks.py) and [Attention processors](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
||||||
|
|
||||||
### Schedulers
|
### Schedulers
|
||||||
|
|
||||||
@@ -100,7 +100,7 @@ The following design principles are followed:
|
|||||||
- All schedulers are found in [`src/diffusers/schedulers`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers).
|
- All schedulers are found in [`src/diffusers/schedulers`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers).
|
||||||
- Schedulers are **not** allowed to import from large utils files and shall be kept very self-contained.
|
- Schedulers are **not** allowed to import from large utils files and shall be kept very self-contained.
|
||||||
- One scheduler Python file corresponds to one scheduler algorithm (as might be defined in a paper).
|
- One scheduler Python file corresponds to one scheduler algorithm (as might be defined in a paper).
|
||||||
- If schedulers share similar functionalities, we can make use of the `#Copied from` mechanism.
|
- If schedulers share similar functionalities, we can make use of the `# Copied from` mechanism.
|
||||||
- Schedulers all inherit from `SchedulerMixin` and `ConfigMixin`.
|
- Schedulers all inherit from `SchedulerMixin` and `ConfigMixin`.
|
||||||
- Schedulers can be easily swapped out with the [`ConfigMixin.from_config`](https://huggingface.co/docs/diffusers/main/en/api/configuration#diffusers.ConfigMixin.from_config) method as explained in detail [here](./docs/source/en/using-diffusers/schedulers.md).
|
- Schedulers can be easily swapped out with the [`ConfigMixin.from_config`](https://huggingface.co/docs/diffusers/main/en/api/configuration#diffusers.ConfigMixin.from_config) method as explained in detail [here](./docs/source/en/using-diffusers/schedulers.md).
|
||||||
- Every scheduler has to have a `set_num_inference_steps`, and a `step` function. `set_num_inference_steps(...)` has to be called before every denoising process, *i.e.* before `step(...)` is called.
|
- Every scheduler has to have a `set_num_inference_steps`, and a `step` function. `set_num_inference_steps(...)` has to be called before every denoising process, *i.e.* before `step(...)` is called.
|
||||||
|
|||||||
24
README.md
24
README.md
@@ -20,21 +20,11 @@ limitations under the License.
|
|||||||
<br>
|
<br>
|
||||||
<p>
|
<p>
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://github.com/huggingface/diffusers/blob/main/LICENSE">
|
<a href="https://github.com/huggingface/diffusers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/datasets.svg?color=blue"></a>
|
||||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/datasets.svg?color=blue">
|
<a href="https://github.com/huggingface/diffusers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/diffusers.svg"></a>
|
||||||
</a>
|
<a href="https://pepy.tech/project/diffusers"><img alt="GitHub release" src="https://static.pepy.tech/badge/diffusers/month"></a>
|
||||||
<a href="https://github.com/huggingface/diffusers/releases">
|
<a href="CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg"></a>
|
||||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/diffusers.svg">
|
<a href="https://twitter.com/diffuserslib"><img alt="X account" src="https://img.shields.io/twitter/url/https/twitter.com/diffuserslib.svg?style=social&label=Follow%20%40diffuserslib"></a>
|
||||||
</a>
|
|
||||||
<a href="https://pepy.tech/project/diffusers">
|
|
||||||
<img alt="GitHub release" src="https://static.pepy.tech/badge/diffusers/month">
|
|
||||||
</a>
|
|
||||||
<a href="CODE_OF_CONDUCT.md">
|
|
||||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg">
|
|
||||||
</a>
|
|
||||||
<a href="https://twitter.com/diffuserslib">
|
|
||||||
<img alt="X account" src="https://img.shields.io/twitter/url/https/twitter.com/diffuserslib.svg?style=social&label=Follow%20%40diffuserslib">
|
|
||||||
</a>
|
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
🤗 Diffusers is the go-to library for state-of-the-art pretrained diffusion models for generating images, audio, and even 3D structures of molecules. Whether you're looking for a simple inference solution or training your own diffusion models, 🤗 Diffusers is a modular toolbox that supports both. Our library is designed with a focus on [usability over performance](https://huggingface.co/docs/diffusers/conceptual/philosophy#usability-over-performance), [simple over easy](https://huggingface.co/docs/diffusers/conceptual/philosophy#simple-over-easy), and [customizability over abstractions](https://huggingface.co/docs/diffusers/conceptual/philosophy#tweakable-contributorfriendly-over-abstraction).
|
🤗 Diffusers is the go-to library for state-of-the-art pretrained diffusion models for generating images, audio, and even 3D structures of molecules. Whether you're looking for a simple inference solution or training your own diffusion models, 🤗 Diffusers is a modular toolbox that supports both. Our library is designed with a focus on [usability over performance](https://huggingface.co/docs/diffusers/conceptual/philosophy#usability-over-performance), [simple over easy](https://huggingface.co/docs/diffusers/conceptual/philosophy#simple-over-easy), and [customizability over abstractions](https://huggingface.co/docs/diffusers/conceptual/philosophy#tweakable-contributorfriendly-over-abstraction).
|
||||||
@@ -77,7 +67,7 @@ Please refer to the [How to use Stable Diffusion in Apple Silicon](https://huggi
|
|||||||
|
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
Generating outputs is super easy with 🤗 Diffusers. To generate an image from text, use the `from_pretrained` method to load any pretrained diffusion model (browse the [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) for 22000+ checkpoints):
|
Generating outputs is super easy with 🤗 Diffusers. To generate an image from text, use the `from_pretrained` method to load any pretrained diffusion model (browse the [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) for 27.000+ checkpoints):
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from diffusers import DiffusionPipeline
|
from diffusers import DiffusionPipeline
|
||||||
@@ -219,7 +209,7 @@ Also, say 👋 in our public Discord channel <a href="https://discord.gg/G7tWnz9
|
|||||||
- https://github.com/deep-floyd/IF
|
- https://github.com/deep-floyd/IF
|
||||||
- https://github.com/bentoml/BentoML
|
- https://github.com/bentoml/BentoML
|
||||||
- https://github.com/bmaltais/kohya_ss
|
- https://github.com/bmaltais/kohya_ss
|
||||||
- +9000 other amazing GitHub repositories 💪
|
- +12.000 other amazing GitHub repositories 💪
|
||||||
|
|
||||||
Thank you for using us ❤️.
|
Thank you for using us ❤️.
|
||||||
|
|
||||||
|
|||||||
52
docker/diffusers-doc-builder/Dockerfile
Normal file
52
docker/diffusers-doc-builder/Dockerfile
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
FROM ubuntu:20.04
|
||||||
|
LABEL maintainer="Hugging Face"
|
||||||
|
LABEL repository="diffusers"
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
RUN apt-get -y update \
|
||||||
|
&& apt-get install -y software-properties-common \
|
||||||
|
&& add-apt-repository ppa:deadsnakes/ppa
|
||||||
|
|
||||||
|
RUN apt install -y bash \
|
||||||
|
build-essential \
|
||||||
|
git \
|
||||||
|
git-lfs \
|
||||||
|
curl \
|
||||||
|
ca-certificates \
|
||||||
|
libsndfile1-dev \
|
||||||
|
python3.10 \
|
||||||
|
python3-pip \
|
||||||
|
libgl1 \
|
||||||
|
zip \
|
||||||
|
wget \
|
||||||
|
python3.10-venv && \
|
||||||
|
rm -rf /var/lib/apt/lists
|
||||||
|
|
||||||
|
# make sure to use venv
|
||||||
|
RUN python3.10 -m venv /opt/venv
|
||||||
|
ENV PATH="/opt/venv/bin:$PATH"
|
||||||
|
|
||||||
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
|
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
||||||
|
python3.10 -m uv pip install --no-cache-dir \
|
||||||
|
torch \
|
||||||
|
torchvision \
|
||||||
|
torchaudio \
|
||||||
|
invisible_watermark \
|
||||||
|
--extra-index-url https://download.pytorch.org/whl/cpu && \
|
||||||
|
python3.10 -m uv pip install --no-cache-dir \
|
||||||
|
accelerate \
|
||||||
|
datasets \
|
||||||
|
hf-doc-builder \
|
||||||
|
huggingface-hub \
|
||||||
|
Jinja2 \
|
||||||
|
librosa \
|
||||||
|
numpy==1.26.4 \
|
||||||
|
scipy \
|
||||||
|
tensorboard \
|
||||||
|
transformers \
|
||||||
|
matplotlib \
|
||||||
|
setuptools==69.5.1
|
||||||
|
|
||||||
|
CMD ["/bin/bash"]
|
||||||
@@ -4,22 +4,25 @@ LABEL repository="diffusers"
|
|||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt-get -y update \
|
||||||
apt install -y bash \
|
&& apt-get install -y software-properties-common \
|
||||||
build-essential \
|
&& add-apt-repository ppa:deadsnakes/ppa
|
||||||
git \
|
|
||||||
git-lfs \
|
RUN apt install -y bash \
|
||||||
curl \
|
build-essential \
|
||||||
ca-certificates \
|
git \
|
||||||
libsndfile1-dev \
|
git-lfs \
|
||||||
libgl1 \
|
curl \
|
||||||
python3.8 \
|
ca-certificates \
|
||||||
python3-pip \
|
libsndfile1-dev \
|
||||||
python3.8-venv && \
|
libgl1 \
|
||||||
|
python3.10 \
|
||||||
|
python3-pip \
|
||||||
|
python3.10-venv && \
|
||||||
rm -rf /var/lib/apt/lists
|
rm -rf /var/lib/apt/lists
|
||||||
|
|
||||||
# make sure to use venv
|
# make sure to use venv
|
||||||
RUN python3 -m venv /opt/venv
|
RUN python3.10 -m venv /opt/venv
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
ENV PATH="/opt/venv/bin:$PATH"
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
@@ -37,7 +40,7 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
|||||||
huggingface-hub \
|
huggingface-hub \
|
||||||
Jinja2 \
|
Jinja2 \
|
||||||
librosa \
|
librosa \
|
||||||
numpy \
|
numpy==1.26.4 \
|
||||||
scipy \
|
scipy \
|
||||||
tensorboard \
|
tensorboard \
|
||||||
transformers
|
transformers
|
||||||
|
|||||||
@@ -4,8 +4,11 @@ LABEL repository="diffusers"
|
|||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt-get -y update \
|
||||||
apt install -y bash \
|
&& apt-get install -y software-properties-common \
|
||||||
|
&& add-apt-repository ppa:deadsnakes/ppa
|
||||||
|
|
||||||
|
RUN apt install -y bash \
|
||||||
build-essential \
|
build-essential \
|
||||||
git \
|
git \
|
||||||
git-lfs \
|
git-lfs \
|
||||||
@@ -13,13 +16,13 @@ RUN apt update && \
|
|||||||
ca-certificates \
|
ca-certificates \
|
||||||
libsndfile1-dev \
|
libsndfile1-dev \
|
||||||
libgl1 \
|
libgl1 \
|
||||||
python3.8 \
|
python3.10 \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3.8-venv && \
|
python3.10-venv && \
|
||||||
rm -rf /var/lib/apt/lists
|
rm -rf /var/lib/apt/lists
|
||||||
|
|
||||||
# make sure to use venv
|
# make sure to use venv
|
||||||
RUN python3 -m venv /opt/venv
|
RUN python3.10 -m venv /opt/venv
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
ENV PATH="/opt/venv/bin:$PATH"
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
@@ -39,7 +42,7 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
|||||||
huggingface-hub \
|
huggingface-hub \
|
||||||
Jinja2 \
|
Jinja2 \
|
||||||
librosa \
|
librosa \
|
||||||
numpy \
|
numpy==1.26.4 \
|
||||||
scipy \
|
scipy \
|
||||||
tensorboard \
|
tensorboard \
|
||||||
transformers
|
transformers
|
||||||
|
|||||||
@@ -4,8 +4,11 @@ LABEL repository="diffusers"
|
|||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt-get -y update \
|
||||||
apt install -y bash \
|
&& apt-get install -y software-properties-common \
|
||||||
|
&& add-apt-repository ppa:deadsnakes/ppa
|
||||||
|
|
||||||
|
RUN apt install -y bash \
|
||||||
build-essential \
|
build-essential \
|
||||||
git \
|
git \
|
||||||
git-lfs \
|
git-lfs \
|
||||||
@@ -13,13 +16,13 @@ RUN apt update && \
|
|||||||
ca-certificates \
|
ca-certificates \
|
||||||
libsndfile1-dev \
|
libsndfile1-dev \
|
||||||
libgl1 \
|
libgl1 \
|
||||||
python3.8 \
|
python3.10 \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3.8-venv && \
|
python3.10-venv && \
|
||||||
rm -rf /var/lib/apt/lists
|
rm -rf /var/lib/apt/lists
|
||||||
|
|
||||||
# make sure to use venv
|
# make sure to use venv
|
||||||
RUN python3 -m venv /opt/venv
|
RUN python3.10 -m venv /opt/venv
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
ENV PATH="/opt/venv/bin:$PATH"
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
@@ -37,7 +40,7 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
|||||||
huggingface-hub \
|
huggingface-hub \
|
||||||
Jinja2 \
|
Jinja2 \
|
||||||
librosa \
|
librosa \
|
||||||
numpy \
|
numpy==1.26.4 \
|
||||||
scipy \
|
scipy \
|
||||||
tensorboard \
|
tensorboard \
|
||||||
transformers
|
transformers
|
||||||
|
|||||||
@@ -4,8 +4,11 @@ LABEL repository="diffusers"
|
|||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt-get -y update \
|
||||||
apt install -y bash \
|
&& apt-get install -y software-properties-common \
|
||||||
|
&& add-apt-repository ppa:deadsnakes/ppa
|
||||||
|
|
||||||
|
RUN apt install -y bash \
|
||||||
build-essential \
|
build-essential \
|
||||||
git \
|
git \
|
||||||
git-lfs \
|
git-lfs \
|
||||||
@@ -13,31 +16,31 @@ RUN apt update && \
|
|||||||
ca-certificates \
|
ca-certificates \
|
||||||
libsndfile1-dev \
|
libsndfile1-dev \
|
||||||
libgl1 \
|
libgl1 \
|
||||||
python3.8 \
|
python3.10 \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3.8-venv && \
|
python3.10-venv && \
|
||||||
rm -rf /var/lib/apt/lists
|
rm -rf /var/lib/apt/lists
|
||||||
|
|
||||||
# make sure to use venv
|
# make sure to use venv
|
||||||
RUN python3 -m venv /opt/venv
|
RUN python3.10 -m venv /opt/venv
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
ENV PATH="/opt/venv/bin:$PATH"
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
||||||
python3 -m uv pip install --no-cache-dir \
|
python3.10 -m uv pip install --no-cache-dir \
|
||||||
torch \
|
torch \
|
||||||
torchvision \
|
torchvision \
|
||||||
torchaudio \
|
torchaudio \
|
||||||
"onnxruntime-gpu>=1.13.1" \
|
"onnxruntime-gpu>=1.13.1" \
|
||||||
--extra-index-url https://download.pytorch.org/whl/cu117 && \
|
--extra-index-url https://download.pytorch.org/whl/cu117 && \
|
||||||
python3 -m uv pip install --no-cache-dir \
|
python3.10 -m uv pip install --no-cache-dir \
|
||||||
accelerate \
|
accelerate \
|
||||||
datasets \
|
datasets \
|
||||||
hf-doc-builder \
|
hf-doc-builder \
|
||||||
huggingface-hub \
|
huggingface-hub \
|
||||||
Jinja2 \
|
Jinja2 \
|
||||||
librosa \
|
librosa \
|
||||||
numpy \
|
numpy==1.26.4 \
|
||||||
scipy \
|
scipy \
|
||||||
tensorboard \
|
tensorboard \
|
||||||
transformers
|
transformers
|
||||||
|
|||||||
@@ -4,8 +4,11 @@ LABEL repository="diffusers"
|
|||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt-get -y update \
|
||||||
apt install -y bash \
|
&& apt-get install -y software-properties-common \
|
||||||
|
&& add-apt-repository ppa:deadsnakes/ppa
|
||||||
|
|
||||||
|
RUN apt install -y bash \
|
||||||
build-essential \
|
build-essential \
|
||||||
git \
|
git \
|
||||||
git-lfs \
|
git-lfs \
|
||||||
@@ -13,31 +16,30 @@ RUN apt update && \
|
|||||||
ca-certificates \
|
ca-certificates \
|
||||||
libsndfile1-dev \
|
libsndfile1-dev \
|
||||||
libgl1 \
|
libgl1 \
|
||||||
python3.9 \
|
python3.10 \
|
||||||
python3.9-dev \
|
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3.9-venv && \
|
python3.10-venv && \
|
||||||
rm -rf /var/lib/apt/lists
|
rm -rf /var/lib/apt/lists
|
||||||
|
|
||||||
# make sure to use venv
|
# make sure to use venv
|
||||||
RUN python3.9 -m venv /opt/venv
|
RUN python3.10 -m venv /opt/venv
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
ENV PATH="/opt/venv/bin:$PATH"
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
RUN python3.9 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
||||||
python3.9 -m uv pip install --no-cache-dir \
|
python3.10 -m uv pip install --no-cache-dir \
|
||||||
torch \
|
torch \
|
||||||
torchvision \
|
torchvision \
|
||||||
torchaudio \
|
torchaudio \
|
||||||
invisible_watermark && \
|
invisible_watermark && \
|
||||||
python3.9 -m pip install --no-cache-dir \
|
python3.10 -m pip install --no-cache-dir \
|
||||||
accelerate \
|
accelerate \
|
||||||
datasets \
|
datasets \
|
||||||
hf-doc-builder \
|
hf-doc-builder \
|
||||||
huggingface-hub \
|
huggingface-hub \
|
||||||
Jinja2 \
|
Jinja2 \
|
||||||
librosa \
|
librosa \
|
||||||
numpy \
|
numpy==1.26.4 \
|
||||||
scipy \
|
scipy \
|
||||||
tensorboard \
|
tensorboard \
|
||||||
transformers
|
transformers
|
||||||
|
|||||||
@@ -4,40 +4,43 @@ LABEL repository="diffusers"
|
|||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt-get -y update \
|
||||||
apt install -y bash \
|
&& apt-get install -y software-properties-common \
|
||||||
|
&& add-apt-repository ppa:deadsnakes/ppa
|
||||||
|
|
||||||
|
RUN apt install -y bash \
|
||||||
build-essential \
|
build-essential \
|
||||||
git \
|
git \
|
||||||
git-lfs \
|
git-lfs \
|
||||||
curl \
|
curl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
libsndfile1-dev \
|
libsndfile1-dev \
|
||||||
python3.8 \
|
python3.10 \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
libgl1 \
|
libgl1 \
|
||||||
python3.8-venv && \
|
python3.10-venv && \
|
||||||
rm -rf /var/lib/apt/lists
|
rm -rf /var/lib/apt/lists
|
||||||
|
|
||||||
# make sure to use venv
|
# make sure to use venv
|
||||||
RUN python3 -m venv /opt/venv
|
RUN python3.10 -m venv /opt/venv
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
ENV PATH="/opt/venv/bin:$PATH"
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
||||||
python3 -m uv pip install --no-cache-dir \
|
python3.10 -m uv pip install --no-cache-dir \
|
||||||
torch \
|
torch \
|
||||||
torchvision \
|
torchvision \
|
||||||
torchaudio \
|
torchaudio \
|
||||||
invisible_watermark \
|
invisible_watermark \
|
||||||
--extra-index-url https://download.pytorch.org/whl/cpu && \
|
--extra-index-url https://download.pytorch.org/whl/cpu && \
|
||||||
python3 -m uv pip install --no-cache-dir \
|
python3.10 -m uv pip install --no-cache-dir \
|
||||||
accelerate \
|
accelerate \
|
||||||
datasets \
|
datasets \
|
||||||
hf-doc-builder \
|
hf-doc-builder \
|
||||||
huggingface-hub \
|
huggingface-hub \
|
||||||
Jinja2 \
|
Jinja2 \
|
||||||
librosa \
|
librosa \
|
||||||
numpy \
|
numpy==1.26.4 \
|
||||||
scipy \
|
scipy \
|
||||||
tensorboard \
|
tensorboard \
|
||||||
transformers matplotlib
|
transformers matplotlib
|
||||||
|
|||||||
@@ -4,8 +4,11 @@ LABEL repository="diffusers"
|
|||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt-get -y update \
|
||||||
apt install -y bash \
|
&& apt-get install -y software-properties-common \
|
||||||
|
&& add-apt-repository ppa:deadsnakes/ppa
|
||||||
|
|
||||||
|
RUN apt install -y bash \
|
||||||
build-essential \
|
build-essential \
|
||||||
git \
|
git \
|
||||||
git-lfs \
|
git-lfs \
|
||||||
@@ -13,30 +16,30 @@ RUN apt update && \
|
|||||||
ca-certificates \
|
ca-certificates \
|
||||||
libsndfile1-dev \
|
libsndfile1-dev \
|
||||||
libgl1 \
|
libgl1 \
|
||||||
python3.8 \
|
python3.10 \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3.8-venv && \
|
python3.10-venv && \
|
||||||
rm -rf /var/lib/apt/lists
|
rm -rf /var/lib/apt/lists
|
||||||
|
|
||||||
# make sure to use venv
|
# make sure to use venv
|
||||||
RUN python3 -m venv /opt/venv
|
RUN python3.10 -m venv /opt/venv
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
ENV PATH="/opt/venv/bin:$PATH"
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
||||||
python3 -m uv pip install --no-cache-dir \
|
python3.10 -m uv pip install --no-cache-dir \
|
||||||
torch \
|
torch \
|
||||||
torchvision \
|
torchvision \
|
||||||
torchaudio \
|
torchaudio \
|
||||||
invisible_watermark && \
|
invisible_watermark && \
|
||||||
python3 -m pip install --no-cache-dir \
|
python3.10 -m pip install --no-cache-dir \
|
||||||
accelerate \
|
accelerate \
|
||||||
datasets \
|
datasets \
|
||||||
hf-doc-builder \
|
hf-doc-builder \
|
||||||
huggingface-hub \
|
huggingface-hub \
|
||||||
Jinja2 \
|
Jinja2 \
|
||||||
librosa \
|
librosa \
|
||||||
numpy \
|
numpy==1.26.4 \
|
||||||
scipy \
|
scipy \
|
||||||
tensorboard \
|
tensorboard \
|
||||||
transformers \
|
transformers \
|
||||||
|
|||||||
@@ -4,8 +4,11 @@ LABEL repository="diffusers"
|
|||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt-get -y update \
|
||||||
apt install -y bash \
|
&& apt-get install -y software-properties-common \
|
||||||
|
&& add-apt-repository ppa:deadsnakes/ppa
|
||||||
|
|
||||||
|
RUN apt install -y bash \
|
||||||
build-essential \
|
build-essential \
|
||||||
git \
|
git \
|
||||||
git-lfs \
|
git-lfs \
|
||||||
@@ -13,30 +16,30 @@ RUN apt update && \
|
|||||||
ca-certificates \
|
ca-certificates \
|
||||||
libsndfile1-dev \
|
libsndfile1-dev \
|
||||||
libgl1 \
|
libgl1 \
|
||||||
python3.8 \
|
python3.10 \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3.8-venv && \
|
python3.10-venv && \
|
||||||
rm -rf /var/lib/apt/lists
|
rm -rf /var/lib/apt/lists
|
||||||
|
|
||||||
# make sure to use venv
|
# make sure to use venv
|
||||||
RUN python3 -m venv /opt/venv
|
RUN python3.10 -m venv /opt/venv
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
ENV PATH="/opt/venv/bin:$PATH"
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
||||||
python3 -m pip install --no-cache-dir \
|
python3.10 -m pip install --no-cache-dir \
|
||||||
torch \
|
torch \
|
||||||
torchvision \
|
torchvision \
|
||||||
torchaudio \
|
torchaudio \
|
||||||
invisible_watermark && \
|
invisible_watermark && \
|
||||||
python3 -m uv pip install --no-cache-dir \
|
python3.10 -m uv pip install --no-cache-dir \
|
||||||
accelerate \
|
accelerate \
|
||||||
datasets \
|
datasets \
|
||||||
hf-doc-builder \
|
hf-doc-builder \
|
||||||
huggingface-hub \
|
huggingface-hub \
|
||||||
Jinja2 \
|
Jinja2 \
|
||||||
librosa \
|
librosa \
|
||||||
numpy \
|
numpy==1.26.4 \
|
||||||
scipy \
|
scipy \
|
||||||
tensorboard \
|
tensorboard \
|
||||||
transformers \
|
transformers \
|
||||||
|
|||||||
@@ -242,10 +242,10 @@ Here's an example of a tuple return, comprising several objects:
|
|||||||
|
|
||||||
```
|
```
|
||||||
Returns:
|
Returns:
|
||||||
`tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs:
|
`tuple(torch.Tensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs:
|
||||||
- ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` --
|
- ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.Tensor` of shape `(1,)` --
|
||||||
Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
|
Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
|
||||||
- **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) --
|
- **prediction_scores** (`torch.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`) --
|
||||||
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -21,158 +21,146 @@
|
|||||||
title: Load LoRAs for inference
|
title: Load LoRAs for inference
|
||||||
- local: tutorials/fast_diffusion
|
- local: tutorials/fast_diffusion
|
||||||
title: Accelerate inference of text-to-image diffusion models
|
title: Accelerate inference of text-to-image diffusion models
|
||||||
|
- local: tutorials/inference_with_big_models
|
||||||
|
title: Working with big models
|
||||||
title: Tutorials
|
title: Tutorials
|
||||||
- sections:
|
- sections:
|
||||||
- sections:
|
- local: using-diffusers/loading
|
||||||
- local: using-diffusers/loading
|
title: Load pipelines
|
||||||
title: Load pipelines
|
- local: using-diffusers/custom_pipeline_overview
|
||||||
- local: using-diffusers/custom_pipeline_overview
|
title: Load community pipelines and components
|
||||||
title: Load community pipelines and components
|
- local: using-diffusers/schedulers
|
||||||
- local: using-diffusers/schedulers
|
title: Load schedulers and models
|
||||||
title: Load schedulers and models
|
- local: using-diffusers/other-formats
|
||||||
- local: using-diffusers/using_safetensors
|
title: Model files and layouts
|
||||||
title: Load safetensors
|
- local: using-diffusers/loading_adapters
|
||||||
- local: using-diffusers/other-formats
|
title: Load adapters
|
||||||
title: Load different Stable Diffusion formats
|
- local: using-diffusers/push_to_hub
|
||||||
- local: using-diffusers/loading_adapters
|
title: Push files to the Hub
|
||||||
title: Load adapters
|
title: Load pipelines and adapters
|
||||||
- local: using-diffusers/push_to_hub
|
|
||||||
title: Push files to the Hub
|
|
||||||
title: Loading & Hub
|
|
||||||
- sections:
|
|
||||||
- local: using-diffusers/pipeline_overview
|
|
||||||
title: Overview
|
|
||||||
- local: using-diffusers/unconditional_image_generation
|
|
||||||
title: Unconditional image generation
|
|
||||||
- local: using-diffusers/conditional_image_generation
|
|
||||||
title: Text-to-image
|
|
||||||
- local: using-diffusers/img2img
|
|
||||||
title: Image-to-image
|
|
||||||
- local: using-diffusers/inpaint
|
|
||||||
title: Inpainting
|
|
||||||
- local: using-diffusers/text-img2vid
|
|
||||||
title: Text or image-to-video
|
|
||||||
- local: using-diffusers/depth2img
|
|
||||||
title: Depth-to-image
|
|
||||||
title: Tasks
|
|
||||||
- sections:
|
|
||||||
- local: using-diffusers/textual_inversion_inference
|
|
||||||
title: Textual inversion
|
|
||||||
- local: using-diffusers/ip_adapter
|
|
||||||
title: IP-Adapter
|
|
||||||
- local: using-diffusers/merge_loras
|
|
||||||
title: Merge LoRAs
|
|
||||||
- local: training/distributed_inference
|
|
||||||
title: Distributed inference with multiple GPUs
|
|
||||||
- local: using-diffusers/reusing_seeds
|
|
||||||
title: Improve image quality with deterministic generation
|
|
||||||
- local: using-diffusers/control_brightness
|
|
||||||
title: Control image brightness
|
|
||||||
- local: using-diffusers/weighted_prompts
|
|
||||||
title: Prompt techniques
|
|
||||||
- local: using-diffusers/freeu
|
|
||||||
title: Improve generation quality with FreeU
|
|
||||||
title: Techniques
|
|
||||||
- sections:
|
|
||||||
- local: using-diffusers/pipeline_overview
|
|
||||||
title: Overview
|
|
||||||
- local: using-diffusers/sdxl
|
|
||||||
title: Stable Diffusion XL
|
|
||||||
- local: using-diffusers/sdxl_turbo
|
|
||||||
title: SDXL Turbo
|
|
||||||
- local: using-diffusers/kandinsky
|
|
||||||
title: Kandinsky
|
|
||||||
- local: using-diffusers/controlnet
|
|
||||||
title: ControlNet
|
|
||||||
- local: using-diffusers/t2i_adapter
|
|
||||||
title: T2I-Adapter
|
|
||||||
- local: using-diffusers/shap-e
|
|
||||||
title: Shap-E
|
|
||||||
- local: using-diffusers/diffedit
|
|
||||||
title: DiffEdit
|
|
||||||
- local: using-diffusers/distilled_sd
|
|
||||||
title: Distilled Stable Diffusion inference
|
|
||||||
- local: using-diffusers/callback
|
|
||||||
title: Pipeline callbacks
|
|
||||||
- local: using-diffusers/reproducibility
|
|
||||||
title: Create reproducible pipelines
|
|
||||||
- local: using-diffusers/custom_pipeline_examples
|
|
||||||
title: Community pipelines
|
|
||||||
- local: using-diffusers/contribute_pipeline
|
|
||||||
title: Contribute a community pipeline
|
|
||||||
- local: using-diffusers/inference_with_lcm_lora
|
|
||||||
title: Latent Consistency Model-LoRA
|
|
||||||
- local: using-diffusers/inference_with_lcm
|
|
||||||
title: Latent Consistency Model
|
|
||||||
- local: using-diffusers/inference_with_tcd_lora
|
|
||||||
title: Trajectory Consistency Distillation-LoRA
|
|
||||||
- local: using-diffusers/svd
|
|
||||||
title: Stable Video Diffusion
|
|
||||||
title: Specific pipeline examples
|
|
||||||
- sections:
|
|
||||||
- local: training/overview
|
|
||||||
title: Overview
|
|
||||||
- local: training/create_dataset
|
|
||||||
title: Create a dataset for training
|
|
||||||
- local: training/adapt_a_model
|
|
||||||
title: Adapt a model to a new task
|
|
||||||
- sections:
|
|
||||||
- local: training/unconditional_training
|
|
||||||
title: Unconditional image generation
|
|
||||||
- local: training/text2image
|
|
||||||
title: Text-to-image
|
|
||||||
- local: training/sdxl
|
|
||||||
title: Stable Diffusion XL
|
|
||||||
- local: training/kandinsky
|
|
||||||
title: Kandinsky 2.2
|
|
||||||
- local: training/wuerstchen
|
|
||||||
title: Wuerstchen
|
|
||||||
- local: training/controlnet
|
|
||||||
title: ControlNet
|
|
||||||
- local: training/t2i_adapters
|
|
||||||
title: T2I-Adapters
|
|
||||||
- local: training/instructpix2pix
|
|
||||||
title: InstructPix2Pix
|
|
||||||
title: Models
|
|
||||||
- sections:
|
|
||||||
- local: training/text_inversion
|
|
||||||
title: Textual Inversion
|
|
||||||
- local: training/dreambooth
|
|
||||||
title: DreamBooth
|
|
||||||
- local: training/lora
|
|
||||||
title: LoRA
|
|
||||||
- local: training/custom_diffusion
|
|
||||||
title: Custom Diffusion
|
|
||||||
- local: training/lcm_distill
|
|
||||||
title: Latent Consistency Distillation
|
|
||||||
- local: training/ddpo
|
|
||||||
title: Reinforcement learning training with DDPO
|
|
||||||
title: Methods
|
|
||||||
title: Training
|
|
||||||
- sections:
|
|
||||||
- local: using-diffusers/other-modalities
|
|
||||||
title: Other Modalities
|
|
||||||
title: Taking Diffusers Beyond Images
|
|
||||||
title: Using Diffusers
|
|
||||||
- sections:
|
- sections:
|
||||||
- local: optimization/opt_overview
|
- local: using-diffusers/unconditional_image_generation
|
||||||
|
title: Unconditional image generation
|
||||||
|
- local: using-diffusers/conditional_image_generation
|
||||||
|
title: Text-to-image
|
||||||
|
- local: using-diffusers/img2img
|
||||||
|
title: Image-to-image
|
||||||
|
- local: using-diffusers/inpaint
|
||||||
|
title: Inpainting
|
||||||
|
- local: using-diffusers/text-img2vid
|
||||||
|
title: Text or image-to-video
|
||||||
|
- local: using-diffusers/depth2img
|
||||||
|
title: Depth-to-image
|
||||||
|
title: Generative tasks
|
||||||
|
- sections:
|
||||||
|
- local: using-diffusers/overview_techniques
|
||||||
title: Overview
|
title: Overview
|
||||||
- sections:
|
- local: training/distributed_inference
|
||||||
- local: optimization/fp16
|
title: Distributed inference with multiple GPUs
|
||||||
title: Speed up inference
|
- local: using-diffusers/merge_loras
|
||||||
- local: optimization/memory
|
title: Merge LoRAs
|
||||||
title: Reduce memory usage
|
- local: using-diffusers/scheduler_features
|
||||||
- local: optimization/torch2.0
|
title: Scheduler features
|
||||||
title: PyTorch 2.0
|
- local: using-diffusers/callback
|
||||||
- local: optimization/xformers
|
title: Pipeline callbacks
|
||||||
title: xFormers
|
- local: using-diffusers/reusing_seeds
|
||||||
- local: optimization/tome
|
title: Reproducible pipelines
|
||||||
title: Token merging
|
- local: using-diffusers/image_quality
|
||||||
- local: optimization/deepcache
|
title: Controlling image quality
|
||||||
title: DeepCache
|
- local: using-diffusers/weighted_prompts
|
||||||
- local: optimization/tgate
|
title: Prompt techniques
|
||||||
title: TGATE
|
title: Inference techniques
|
||||||
title: General optimizations
|
- sections:
|
||||||
|
- local: advanced_inference/outpaint
|
||||||
|
title: Outpainting
|
||||||
|
title: Advanced inference
|
||||||
|
- sections:
|
||||||
|
- local: using-diffusers/sdxl
|
||||||
|
title: Stable Diffusion XL
|
||||||
|
- local: using-diffusers/sdxl_turbo
|
||||||
|
title: SDXL Turbo
|
||||||
|
- local: using-diffusers/kandinsky
|
||||||
|
title: Kandinsky
|
||||||
|
- local: using-diffusers/ip_adapter
|
||||||
|
title: IP-Adapter
|
||||||
|
- local: using-diffusers/pag
|
||||||
|
title: PAG
|
||||||
|
- local: using-diffusers/controlnet
|
||||||
|
title: ControlNet
|
||||||
|
- local: using-diffusers/t2i_adapter
|
||||||
|
title: T2I-Adapter
|
||||||
|
- local: using-diffusers/inference_with_lcm
|
||||||
|
title: Latent Consistency Model
|
||||||
|
- local: using-diffusers/textual_inversion_inference
|
||||||
|
title: Textual inversion
|
||||||
|
- local: using-diffusers/shap-e
|
||||||
|
title: Shap-E
|
||||||
|
- local: using-diffusers/diffedit
|
||||||
|
title: DiffEdit
|
||||||
|
- local: using-diffusers/inference_with_tcd_lora
|
||||||
|
title: Trajectory Consistency Distillation-LoRA
|
||||||
|
- local: using-diffusers/svd
|
||||||
|
title: Stable Video Diffusion
|
||||||
|
- local: using-diffusers/marigold_usage
|
||||||
|
title: Marigold Computer Vision
|
||||||
|
title: Specific pipeline examples
|
||||||
|
- sections:
|
||||||
|
- local: training/overview
|
||||||
|
title: Overview
|
||||||
|
- local: training/create_dataset
|
||||||
|
title: Create a dataset for training
|
||||||
|
- local: training/adapt_a_model
|
||||||
|
title: Adapt a model to a new task
|
||||||
|
- isExpanded: false
|
||||||
|
sections:
|
||||||
|
- local: training/unconditional_training
|
||||||
|
title: Unconditional image generation
|
||||||
|
- local: training/text2image
|
||||||
|
title: Text-to-image
|
||||||
|
- local: training/sdxl
|
||||||
|
title: Stable Diffusion XL
|
||||||
|
- local: training/kandinsky
|
||||||
|
title: Kandinsky 2.2
|
||||||
|
- local: training/wuerstchen
|
||||||
|
title: Wuerstchen
|
||||||
|
- local: training/controlnet
|
||||||
|
title: ControlNet
|
||||||
|
- local: training/t2i_adapters
|
||||||
|
title: T2I-Adapters
|
||||||
|
- local: training/instructpix2pix
|
||||||
|
title: InstructPix2Pix
|
||||||
|
title: Models
|
||||||
|
- isExpanded: false
|
||||||
|
sections:
|
||||||
|
- local: training/text_inversion
|
||||||
|
title: Textual Inversion
|
||||||
|
- local: training/dreambooth
|
||||||
|
title: DreamBooth
|
||||||
|
- local: training/lora
|
||||||
|
title: LoRA
|
||||||
|
- local: training/custom_diffusion
|
||||||
|
title: Custom Diffusion
|
||||||
|
- local: training/lcm_distill
|
||||||
|
title: Latent Consistency Distillation
|
||||||
|
- local: training/ddpo
|
||||||
|
title: Reinforcement learning training with DDPO
|
||||||
|
title: Methods
|
||||||
|
title: Training
|
||||||
|
- sections:
|
||||||
|
- local: optimization/fp16
|
||||||
|
title: Speed up inference
|
||||||
|
- local: optimization/memory
|
||||||
|
title: Reduce memory usage
|
||||||
|
- local: optimization/torch2.0
|
||||||
|
title: PyTorch 2.0
|
||||||
|
- local: optimization/xformers
|
||||||
|
title: xFormers
|
||||||
|
- local: optimization/tome
|
||||||
|
title: Token merging
|
||||||
|
- local: optimization/deepcache
|
||||||
|
title: DeepCache
|
||||||
|
- local: optimization/tgate
|
||||||
|
title: TGATE
|
||||||
- sections:
|
- sections:
|
||||||
- local: using-diffusers/stable_diffusion_jax_how_to
|
- local: using-diffusers/stable_diffusion_jax_how_to
|
||||||
title: JAX/Flax
|
title: JAX/Flax
|
||||||
@@ -182,14 +170,14 @@
|
|||||||
title: OpenVINO
|
title: OpenVINO
|
||||||
- local: optimization/coreml
|
- local: optimization/coreml
|
||||||
title: Core ML
|
title: Core ML
|
||||||
title: Optimized model types
|
title: Optimized model formats
|
||||||
- sections:
|
- sections:
|
||||||
- local: optimization/mps
|
- local: optimization/mps
|
||||||
title: Metal Performance Shaders (MPS)
|
title: Metal Performance Shaders (MPS)
|
||||||
- local: optimization/habana
|
- local: optimization/habana
|
||||||
title: Habana Gaudi
|
title: Habana Gaudi
|
||||||
title: Optimized hardware
|
title: Optimized hardware
|
||||||
title: Optimization
|
title: Accelerate inference and reduce memory
|
||||||
- sections:
|
- sections:
|
||||||
- local: conceptual/philosophy
|
- local: conceptual/philosophy
|
||||||
title: Philosophy
|
title: Philosophy
|
||||||
@@ -203,7 +191,8 @@
|
|||||||
title: Evaluating Diffusion Models
|
title: Evaluating Diffusion Models
|
||||||
title: Conceptual Guides
|
title: Conceptual Guides
|
||||||
- sections:
|
- sections:
|
||||||
- sections:
|
- isExpanded: false
|
||||||
|
sections:
|
||||||
- local: api/configuration
|
- local: api/configuration
|
||||||
title: Configuration
|
title: Configuration
|
||||||
- local: api/logging
|
- local: api/logging
|
||||||
@@ -211,7 +200,8 @@
|
|||||||
- local: api/outputs
|
- local: api/outputs
|
||||||
title: Outputs
|
title: Outputs
|
||||||
title: Main Classes
|
title: Main Classes
|
||||||
- sections:
|
- isExpanded: false
|
||||||
|
sections:
|
||||||
- local: api/loaders/ip_adapter
|
- local: api/loaders/ip_adapter
|
||||||
title: IP-Adapter
|
title: IP-Adapter
|
||||||
- local: api/loaders/lora
|
- local: api/loaders/lora
|
||||||
@@ -225,7 +215,8 @@
|
|||||||
- local: api/loaders/peft
|
- local: api/loaders/peft
|
||||||
title: PEFT
|
title: PEFT
|
||||||
title: Loaders
|
title: Loaders
|
||||||
- sections:
|
- isExpanded: false
|
||||||
|
sections:
|
||||||
- local: api/models/overview
|
- local: api/models/overview
|
||||||
title: Overview
|
title: Overview
|
||||||
- local: api/models/unet
|
- local: api/models/unet
|
||||||
@@ -251,15 +242,26 @@
|
|||||||
- local: api/models/consistency_decoder_vae
|
- local: api/models/consistency_decoder_vae
|
||||||
title: ConsistencyDecoderVAE
|
title: ConsistencyDecoderVAE
|
||||||
- local: api/models/transformer2d
|
- local: api/models/transformer2d
|
||||||
title: Transformer2D
|
title: Transformer2DModel
|
||||||
|
- local: api/models/pixart_transformer2d
|
||||||
|
title: PixArtTransformer2DModel
|
||||||
|
- local: api/models/dit_transformer2d
|
||||||
|
title: DiTTransformer2DModel
|
||||||
|
- local: api/models/hunyuan_transformer2d
|
||||||
|
title: HunyuanDiT2DModel
|
||||||
- local: api/models/transformer_temporal
|
- local: api/models/transformer_temporal
|
||||||
title: Transformer Temporal
|
title: TransformerTemporalModel
|
||||||
|
- local: api/models/sd3_transformer2d
|
||||||
|
title: SD3Transformer2DModel
|
||||||
- local: api/models/prior_transformer
|
- local: api/models/prior_transformer
|
||||||
title: Prior Transformer
|
title: PriorTransformer
|
||||||
- local: api/models/controlnet
|
- local: api/models/controlnet
|
||||||
title: ControlNet
|
title: ControlNetModel
|
||||||
|
- local: api/models/controlnet_sd3
|
||||||
|
title: SD3ControlNetModel
|
||||||
title: Models
|
title: Models
|
||||||
- sections:
|
- isExpanded: false
|
||||||
|
sections:
|
||||||
- local: api/pipelines/overview
|
- local: api/pipelines/overview
|
||||||
title: Overview
|
title: Overview
|
||||||
- local: api/pipelines/amused
|
- local: api/pipelines/amused
|
||||||
@@ -280,6 +282,8 @@
|
|||||||
title: Consistency Models
|
title: Consistency Models
|
||||||
- local: api/pipelines/controlnet
|
- local: api/pipelines/controlnet
|
||||||
title: ControlNet
|
title: ControlNet
|
||||||
|
- local: api/pipelines/controlnet_sd3
|
||||||
|
title: ControlNet with Stable Diffusion 3
|
||||||
- local: api/pipelines/controlnet_sdxl
|
- local: api/pipelines/controlnet_sdxl
|
||||||
title: ControlNet with Stable Diffusion XL
|
title: ControlNet with Stable Diffusion XL
|
||||||
- local: api/pipelines/controlnetxs
|
- local: api/pipelines/controlnetxs
|
||||||
@@ -298,6 +302,8 @@
|
|||||||
title: DiffEdit
|
title: DiffEdit
|
||||||
- local: api/pipelines/dit
|
- local: api/pipelines/dit
|
||||||
title: DiT
|
title: DiT
|
||||||
|
- local: api/pipelines/hunyuandit
|
||||||
|
title: Hunyuan-DiT
|
||||||
- local: api/pipelines/i2vgenxl
|
- local: api/pipelines/i2vgenxl
|
||||||
title: I2VGen-XL
|
title: I2VGen-XL
|
||||||
- local: api/pipelines/pix2pix
|
- local: api/pipelines/pix2pix
|
||||||
@@ -314,16 +320,22 @@
|
|||||||
title: Latent Diffusion
|
title: Latent Diffusion
|
||||||
- local: api/pipelines/ledits_pp
|
- local: api/pipelines/ledits_pp
|
||||||
title: LEDITS++
|
title: LEDITS++
|
||||||
|
- local: api/pipelines/marigold
|
||||||
|
title: Marigold
|
||||||
- local: api/pipelines/panorama
|
- local: api/pipelines/panorama
|
||||||
title: MultiDiffusion
|
title: MultiDiffusion
|
||||||
- local: api/pipelines/musicldm
|
- local: api/pipelines/musicldm
|
||||||
title: MusicLDM
|
title: MusicLDM
|
||||||
|
- local: api/pipelines/pag
|
||||||
|
title: PAG
|
||||||
- local: api/pipelines/paint_by_example
|
- local: api/pipelines/paint_by_example
|
||||||
title: Paint by Example
|
title: Paint by Example
|
||||||
- local: api/pipelines/pia
|
- local: api/pipelines/pia
|
||||||
title: Personalized Image Animator (PIA)
|
title: Personalized Image Animator (PIA)
|
||||||
- local: api/pipelines/pixart
|
- local: api/pipelines/pixart
|
||||||
title: PixArt-α
|
title: PixArt-α
|
||||||
|
- local: api/pipelines/pixart_sigma
|
||||||
|
title: PixArt-Σ
|
||||||
- local: api/pipelines/self_attention_guidance
|
- local: api/pipelines/self_attention_guidance
|
||||||
title: Self-Attention Guidance
|
title: Self-Attention Guidance
|
||||||
- local: api/pipelines/semantic_stable_diffusion
|
- local: api/pipelines/semantic_stable_diffusion
|
||||||
@@ -351,6 +363,8 @@
|
|||||||
title: Safe Stable Diffusion
|
title: Safe Stable Diffusion
|
||||||
- local: api/pipelines/stable_diffusion/stable_diffusion_2
|
- local: api/pipelines/stable_diffusion/stable_diffusion_2
|
||||||
title: Stable Diffusion 2
|
title: Stable Diffusion 2
|
||||||
|
- local: api/pipelines/stable_diffusion/stable_diffusion_3
|
||||||
|
title: Stable Diffusion 3
|
||||||
- local: api/pipelines/stable_diffusion/stable_diffusion_xl
|
- local: api/pipelines/stable_diffusion/stable_diffusion_xl
|
||||||
title: Stable Diffusion XL
|
title: Stable Diffusion XL
|
||||||
- local: api/pipelines/stable_diffusion/sdxl_turbo
|
- local: api/pipelines/stable_diffusion/sdxl_turbo
|
||||||
@@ -383,7 +397,8 @@
|
|||||||
- local: api/pipelines/wuerstchen
|
- local: api/pipelines/wuerstchen
|
||||||
title: Wuerstchen
|
title: Wuerstchen
|
||||||
title: Pipelines
|
title: Pipelines
|
||||||
- sections:
|
- isExpanded: false
|
||||||
|
sections:
|
||||||
- local: api/schedulers/overview
|
- local: api/schedulers/overview
|
||||||
title: Overview
|
title: Overview
|
||||||
- local: api/schedulers/cm_stochastic_iterative
|
- local: api/schedulers/cm_stochastic_iterative
|
||||||
@@ -414,6 +429,8 @@
|
|||||||
title: EulerAncestralDiscreteScheduler
|
title: EulerAncestralDiscreteScheduler
|
||||||
- local: api/schedulers/euler
|
- local: api/schedulers/euler
|
||||||
title: EulerDiscreteScheduler
|
title: EulerDiscreteScheduler
|
||||||
|
- local: api/schedulers/flow_match_euler_discrete
|
||||||
|
title: FlowMatchEulerDiscreteScheduler
|
||||||
- local: api/schedulers/heun
|
- local: api/schedulers/heun
|
||||||
title: HeunDiscreteScheduler
|
title: HeunDiscreteScheduler
|
||||||
- local: api/schedulers/ipndm
|
- local: api/schedulers/ipndm
|
||||||
@@ -443,7 +460,8 @@
|
|||||||
- local: api/schedulers/vq_diffusion
|
- local: api/schedulers/vq_diffusion
|
||||||
title: VQDiffusionScheduler
|
title: VQDiffusionScheduler
|
||||||
title: Schedulers
|
title: Schedulers
|
||||||
- sections:
|
- isExpanded: false
|
||||||
|
sections:
|
||||||
- local: api/internal_classes_overview
|
- local: api/internal_classes_overview
|
||||||
title: Overview
|
title: Overview
|
||||||
- local: api/attnprocessor
|
- local: api/attnprocessor
|
||||||
@@ -456,5 +474,7 @@
|
|||||||
title: Utilities
|
title: Utilities
|
||||||
- local: api/image_processor
|
- local: api/image_processor
|
||||||
title: VAE Image Processor
|
title: VAE Image Processor
|
||||||
|
- local: api/video_processor
|
||||||
|
title: Video Processor
|
||||||
title: Internal classes
|
title: Internal classes
|
||||||
title: API
|
title: API
|
||||||
|
|||||||
231
docs/source/en/advanced_inference/outpaint.md
Normal file
231
docs/source/en/advanced_inference/outpaint.md
Normal file
@@ -0,0 +1,231 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Outpainting
|
||||||
|
|
||||||
|
Outpainting extends an image beyond its original boundaries, allowing you to add, replace, or modify visual elements in an image while preserving the original image. Like [inpainting](../using-diffusers/inpaint), you want to fill the white area (in this case, the area outside of the original image) with new visual elements while keeping the original image (represented by a mask of black pixels). There are a couple of ways to outpaint, such as with a [ControlNet](https://hf.co/blog/OzzyGT/outpainting-controlnet) or with [Differential Diffusion](https://hf.co/blog/OzzyGT/outpainting-differential-diffusion).
|
||||||
|
|
||||||
|
This guide will show you how to outpaint with an inpainting model, ControlNet, and a ZoeDepth estimator.
|
||||||
|
|
||||||
|
Before you begin, make sure you have the [controlnet_aux](https://github.com/huggingface/controlnet_aux) library installed so you can use the ZoeDepth estimator.
|
||||||
|
|
||||||
|
```py
|
||||||
|
!pip install -q controlnet_aux
|
||||||
|
```
|
||||||
|
|
||||||
|
## Image preparation
|
||||||
|
|
||||||
|
Start by picking an image to outpaint with and remove the background with a Space like [BRIA-RMBG-1.4](https://hf.co/spaces/briaai/BRIA-RMBG-1.4).
|
||||||
|
|
||||||
|
<iframe
|
||||||
|
src="https://briaai-bria-rmbg-1-4.hf.space"
|
||||||
|
frameborder="0"
|
||||||
|
width="850"
|
||||||
|
height="450"
|
||||||
|
></iframe>
|
||||||
|
|
||||||
|
For example, remove the background from this image of a pair of shoes.
|
||||||
|
|
||||||
|
<div class="flex flex-row gap-4">
|
||||||
|
<div class="flex-1">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/original-jordan.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption>
|
||||||
|
</div>
|
||||||
|
<div class="flex-1">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/no-background-jordan.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">background removed</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
[Stable Diffusion XL (SDXL)](../using-diffusers/sdxl) models work best with 1024x1024 images, but you can resize the image to any size as long as your hardware has enough memory to support it. The transparent background in the image should also be replaced with a white background. Create a function (like the one below) that scales and pastes the image onto a white background.
|
||||||
|
|
||||||
|
```py
|
||||||
|
import random
|
||||||
|
|
||||||
|
import requests
|
||||||
|
import torch
|
||||||
|
from controlnet_aux import ZoeDetector
|
||||||
|
from PIL import Image, ImageOps
|
||||||
|
|
||||||
|
from diffusers import (
|
||||||
|
AutoencoderKL,
|
||||||
|
ControlNetModel,
|
||||||
|
StableDiffusionXLControlNetPipeline,
|
||||||
|
StableDiffusionXLInpaintPipeline,
|
||||||
|
)
|
||||||
|
|
||||||
|
def scale_and_paste(original_image):
|
||||||
|
aspect_ratio = original_image.width / original_image.height
|
||||||
|
|
||||||
|
if original_image.width > original_image.height:
|
||||||
|
new_width = 1024
|
||||||
|
new_height = round(new_width / aspect_ratio)
|
||||||
|
else:
|
||||||
|
new_height = 1024
|
||||||
|
new_width = round(new_height * aspect_ratio)
|
||||||
|
|
||||||
|
resized_original = original_image.resize((new_width, new_height), Image.LANCZOS)
|
||||||
|
white_background = Image.new("RGBA", (1024, 1024), "white")
|
||||||
|
x = (1024 - new_width) // 2
|
||||||
|
y = (1024 - new_height) // 2
|
||||||
|
white_background.paste(resized_original, (x, y), resized_original)
|
||||||
|
|
||||||
|
return resized_original, white_background
|
||||||
|
|
||||||
|
original_image = Image.open(
|
||||||
|
requests.get(
|
||||||
|
"https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/no-background-jordan.png",
|
||||||
|
stream=True,
|
||||||
|
).raw
|
||||||
|
).convert("RGBA")
|
||||||
|
resized_img, white_bg_image = scale_and_paste(original_image)
|
||||||
|
```
|
||||||
|
|
||||||
|
To avoid adding unwanted extra details, use the ZoeDepth estimator to provide additional guidance during generation and to ensure the shoes remain consistent with the original image.
|
||||||
|
|
||||||
|
```py
|
||||||
|
zoe = ZoeDetector.from_pretrained("lllyasviel/Annotators")
|
||||||
|
image_zoe = zoe(white_bg_image, detect_resolution=512, image_resolution=1024)
|
||||||
|
image_zoe
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/zoedepth-jordan.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## Outpaint
|
||||||
|
|
||||||
|
Once your image is ready, you can generate content in the white area around the shoes with [controlnet-inpaint-dreamer-sdxl](https://hf.co/destitech/controlnet-inpaint-dreamer-sdxl), a SDXL ControlNet trained for inpainting.
|
||||||
|
|
||||||
|
Load the inpainting ControlNet, ZoeDepth model, VAE and pass them to the [`StableDiffusionXLControlNetPipeline`]. Then you can create an optional `generate_image` function (for convenience) to outpaint an initial image.
|
||||||
|
|
||||||
|
```py
|
||||||
|
controlnets = [
|
||||||
|
ControlNetModel.from_pretrained(
|
||||||
|
"destitech/controlnet-inpaint-dreamer-sdxl", torch_dtype=torch.float16, variant="fp16"
|
||||||
|
),
|
||||||
|
ControlNetModel.from_pretrained(
|
||||||
|
"diffusers/controlnet-zoe-depth-sdxl-1.0", torch_dtype=torch.float16
|
||||||
|
),
|
||||||
|
]
|
||||||
|
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16).to("cuda")
|
||||||
|
pipeline = StableDiffusionXLControlNetPipeline.from_pretrained(
|
||||||
|
"SG161222/RealVisXL_V4.0", torch_dtype=torch.float16, variant="fp16", controlnet=controlnets, vae=vae
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
def generate_image(prompt, negative_prompt, inpaint_image, zoe_image, seed: int = None):
|
||||||
|
if seed is None:
|
||||||
|
seed = random.randint(0, 2**32 - 1)
|
||||||
|
|
||||||
|
generator = torch.Generator(device="cpu").manual_seed(seed)
|
||||||
|
|
||||||
|
image = pipeline(
|
||||||
|
prompt,
|
||||||
|
negative_prompt=negative_prompt,
|
||||||
|
image=[inpaint_image, zoe_image],
|
||||||
|
guidance_scale=6.5,
|
||||||
|
num_inference_steps=25,
|
||||||
|
generator=generator,
|
||||||
|
controlnet_conditioning_scale=[0.5, 0.8],
|
||||||
|
control_guidance_end=[0.9, 0.6],
|
||||||
|
).images[0]
|
||||||
|
|
||||||
|
return image
|
||||||
|
|
||||||
|
prompt = "nike air jordans on a basketball court"
|
||||||
|
negative_prompt = ""
|
||||||
|
|
||||||
|
temp_image = generate_image(prompt, negative_prompt, white_bg_image, image_zoe, 908097)
|
||||||
|
```
|
||||||
|
|
||||||
|
Paste the original image over the initial outpainted image. You'll improve the outpainted background in a later step.
|
||||||
|
|
||||||
|
```py
|
||||||
|
x = (1024 - resized_img.width) // 2
|
||||||
|
y = (1024 - resized_img.height) // 2
|
||||||
|
temp_image.paste(resized_img, (x, y), resized_img)
|
||||||
|
temp_image
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/initial-outpaint.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Now is a good time to free up some memory if you're running low!
|
||||||
|
>
|
||||||
|
> ```py
|
||||||
|
> pipeline=None
|
||||||
|
> torch.cuda.empty_cache()
|
||||||
|
> ```
|
||||||
|
|
||||||
|
Now that you have an initial outpainted image, load the [`StableDiffusionXLInpaintPipeline`] with the [RealVisXL](https://hf.co/SG161222/RealVisXL_V4.0) model to generate the final outpainted image with better quality.
|
||||||
|
|
||||||
|
```py
|
||||||
|
pipeline = StableDiffusionXLInpaintPipeline.from_pretrained(
|
||||||
|
"OzzyGT/RealVisXL_V4.0_inpainting",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
variant="fp16",
|
||||||
|
vae=vae,
|
||||||
|
).to("cuda")
|
||||||
|
```
|
||||||
|
|
||||||
|
Prepare a mask for the final outpainted image. To create a more natural transition between the original image and the outpainted background, blur the mask to help it blend better.
|
||||||
|
|
||||||
|
```py
|
||||||
|
mask = Image.new("L", temp_image.size)
|
||||||
|
mask.paste(resized_img.split()[3], (x, y))
|
||||||
|
mask = ImageOps.invert(mask)
|
||||||
|
final_mask = mask.point(lambda p: p > 128 and 255)
|
||||||
|
mask_blurred = pipeline.mask_processor.blur(final_mask, blur_factor=20)
|
||||||
|
mask_blurred
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/blurred-mask.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
Create a better prompt and pass it to the `generate_outpaint` function to generate the final outpainted image. Again, paste the original image over the final outpainted background.
|
||||||
|
|
||||||
|
```py
|
||||||
|
def generate_outpaint(prompt, negative_prompt, image, mask, seed: int = None):
|
||||||
|
if seed is None:
|
||||||
|
seed = random.randint(0, 2**32 - 1)
|
||||||
|
|
||||||
|
generator = torch.Generator(device="cpu").manual_seed(seed)
|
||||||
|
|
||||||
|
image = pipeline(
|
||||||
|
prompt,
|
||||||
|
negative_prompt=negative_prompt,
|
||||||
|
image=image,
|
||||||
|
mask_image=mask,
|
||||||
|
guidance_scale=10.0,
|
||||||
|
strength=0.8,
|
||||||
|
num_inference_steps=30,
|
||||||
|
generator=generator,
|
||||||
|
).images[0]
|
||||||
|
|
||||||
|
return image
|
||||||
|
|
||||||
|
prompt = "high quality photo of nike air jordans on a basketball court, highly detailed"
|
||||||
|
negative_prompt = ""
|
||||||
|
|
||||||
|
final_image = generate_outpaint(prompt, negative_prompt, temp_image, mask_blurred, 7688778)
|
||||||
|
x = (1024 - resized_img.width) // 2
|
||||||
|
y = (1024 - resized_img.height) // 2
|
||||||
|
final_image.paste(resized_img, (x, y), resized_img)
|
||||||
|
final_image
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/final-outpaint.png"/>
|
||||||
|
</div>
|
||||||
@@ -41,12 +41,6 @@ An attention processor is a class for applying different types of attention mech
|
|||||||
## FusedAttnProcessor2_0
|
## FusedAttnProcessor2_0
|
||||||
[[autodoc]] models.attention_processor.FusedAttnProcessor2_0
|
[[autodoc]] models.attention_processor.FusedAttnProcessor2_0
|
||||||
|
|
||||||
## LoRAAttnAddedKVProcessor
|
|
||||||
[[autodoc]] models.attention_processor.LoRAAttnAddedKVProcessor
|
|
||||||
|
|
||||||
## LoRAXFormersAttnProcessor
|
|
||||||
[[autodoc]] models.attention_processor.LoRAXFormersAttnProcessor
|
|
||||||
|
|
||||||
## SlicedAttnProcessor
|
## SlicedAttnProcessor
|
||||||
[[autodoc]] models.attention_processor.SlicedAttnProcessor
|
[[autodoc]] models.attention_processor.SlicedAttnProcessor
|
||||||
|
|
||||||
@@ -55,3 +49,6 @@ An attention processor is a class for applying different types of attention mech
|
|||||||
|
|
||||||
## XFormersAttnProcessor
|
## XFormersAttnProcessor
|
||||||
[[autodoc]] models.attention_processor.XFormersAttnProcessor
|
[[autodoc]] models.attention_processor.XFormersAttnProcessor
|
||||||
|
|
||||||
|
## AttnProcessorNPU
|
||||||
|
[[autodoc]] models.attention_processor.AttnProcessorNPU
|
||||||
|
|||||||
@@ -25,3 +25,11 @@ All pipelines with [`VaeImageProcessor`] accept PIL Image, PyTorch tensor, or Nu
|
|||||||
The [`VaeImageProcessorLDM3D`] accepts RGB and depth inputs and returns RGB and depth outputs.
|
The [`VaeImageProcessorLDM3D`] accepts RGB and depth inputs and returns RGB and depth outputs.
|
||||||
|
|
||||||
[[autodoc]] image_processor.VaeImageProcessorLDM3D
|
[[autodoc]] image_processor.VaeImageProcessorLDM3D
|
||||||
|
|
||||||
|
## PixArtImageProcessor
|
||||||
|
|
||||||
|
[[autodoc]] image_processor.PixArtImageProcessor
|
||||||
|
|
||||||
|
## IPAdapterMaskProcessor
|
||||||
|
|
||||||
|
[[autodoc]] image_processor.IPAdapterMaskProcessor
|
||||||
|
|||||||
@@ -12,26 +12,50 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
# Single files
|
# Single files
|
||||||
|
|
||||||
Diffusers supports loading pretrained pipeline (or model) weights stored in a single file, such as a `ckpt` or `safetensors` file. These single file types are typically produced from community trained models. There are three classes for loading single file weights:
|
The [`~loaders.FromSingleFileMixin.from_single_file`] method allows you to load:
|
||||||
|
|
||||||
- [`FromSingleFileMixin`] supports loading pretrained pipeline weights stored in a single file, which can either be a `ckpt` or `safetensors` file.
|
* a model stored in a single file, which is useful if you're working with models from the diffusion ecosystem, like Automatic1111, and commonly rely on a single-file layout to store and share models
|
||||||
- [`FromOriginalVAEMixin`] supports loading a pretrained [`AutoencoderKL`] from pretrained ControlNet weights stored in a single file, which can either be a `ckpt` or `safetensors` file.
|
* a model stored in their originally distributed layout, which is useful if you're working with models finetuned with other services, and want to load it directly into Diffusers model objects and pipelines
|
||||||
- [`FromOriginalControlnetMixin`] supports loading pretrained ControlNet weights stored in a single file, which can either be a `ckpt` or `safetensors` file.
|
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Read the [Model files and layouts](../../using-diffusers/other-formats) guide to learn more about the Diffusers-multifolder layout versus the single-file layout, and how to load models stored in these different layouts.
|
||||||
|
|
||||||
To learn more about how to load single file weights, see the [Load different Stable Diffusion formats](../../using-diffusers/other-formats) loading guide.
|
## Supported pipelines
|
||||||
|
|
||||||
</Tip>
|
- [`StableDiffusionPipeline`]
|
||||||
|
- [`StableDiffusionImg2ImgPipeline`]
|
||||||
|
- [`StableDiffusionInpaintPipeline`]
|
||||||
|
- [`StableDiffusionControlNetPipeline`]
|
||||||
|
- [`StableDiffusionControlNetImg2ImgPipeline`]
|
||||||
|
- [`StableDiffusionControlNetInpaintPipeline`]
|
||||||
|
- [`StableDiffusionUpscalePipeline`]
|
||||||
|
- [`StableDiffusionXLPipeline`]
|
||||||
|
- [`StableDiffusionXLImg2ImgPipeline`]
|
||||||
|
- [`StableDiffusionXLInpaintPipeline`]
|
||||||
|
- [`StableDiffusionXLInstructPix2PixPipeline`]
|
||||||
|
- [`StableDiffusionXLControlNetPipeline`]
|
||||||
|
- [`StableDiffusionXLKDiffusionPipeline`]
|
||||||
|
- [`StableDiffusion3Pipeline`]
|
||||||
|
- [`LatentConsistencyModelPipeline`]
|
||||||
|
- [`LatentConsistencyModelImg2ImgPipeline`]
|
||||||
|
- [`StableDiffusionControlNetXSPipeline`]
|
||||||
|
- [`StableDiffusionXLControlNetXSPipeline`]
|
||||||
|
- [`LEditsPPPipelineStableDiffusion`]
|
||||||
|
- [`LEditsPPPipelineStableDiffusionXL`]
|
||||||
|
- [`PIAPipeline`]
|
||||||
|
|
||||||
|
## Supported models
|
||||||
|
|
||||||
|
- [`UNet2DConditionModel`]
|
||||||
|
- [`StableCascadeUNet`]
|
||||||
|
- [`AutoencoderKL`]
|
||||||
|
- [`ControlNetModel`]
|
||||||
|
- [`SD3Transformer2DModel`]
|
||||||
|
|
||||||
## FromSingleFileMixin
|
## FromSingleFileMixin
|
||||||
|
|
||||||
[[autodoc]] loaders.single_file.FromSingleFileMixin
|
[[autodoc]] loaders.single_file.FromSingleFileMixin
|
||||||
|
|
||||||
## FromOriginalVAEMixin
|
## FromOriginalModelMixin
|
||||||
|
|
||||||
[[autodoc]] loaders.autoencoder.FromOriginalVAEMixin
|
[[autodoc]] loaders.single_file_model.FromOriginalModelMixin
|
||||||
|
|
||||||
## FromOriginalControlnetMixin
|
|
||||||
|
|
||||||
[[autodoc]] loaders.controlnet.FromOriginalControlNetMixin
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
|
|||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# ControlNet
|
# ControlNetModel
|
||||||
|
|
||||||
The ControlNet model was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, Maneesh Agrawala. It provides a greater degree of control over text-to-image generation by conditioning the model on additional inputs such as edge maps, depth maps, segmentation maps, and keypoints for pose detection.
|
The ControlNet model was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, Maneesh Agrawala. It provides a greater degree of control over text-to-image generation by conditioning the model on additional inputs such as edge maps, depth maps, segmentation maps, and keypoints for pose detection.
|
||||||
|
|
||||||
|
|||||||
42
docs/source/en/api/models/controlnet_sd3.md
Normal file
42
docs/source/en/api/models/controlnet_sd3.md
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team and The InstantX Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# SD3ControlNetModel
|
||||||
|
|
||||||
|
SD3ControlNetModel is an implementation of ControlNet for Stable Diffusion 3.
|
||||||
|
|
||||||
|
The ControlNet model was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, Maneesh Agrawala. It provides a greater degree of control over text-to-image generation by conditioning the model on additional inputs such as edge maps, depth maps, segmentation maps, and keypoints for pose detection.
|
||||||
|
|
||||||
|
The abstract from the paper is:
|
||||||
|
|
||||||
|
*We present ControlNet, a neural network architecture to add spatial conditioning controls to large, pretrained text-to-image diffusion models. ControlNet locks the production-ready large diffusion models, and reuses their deep and robust encoding layers pretrained with billions of images as a strong backbone to learn a diverse set of conditional controls. The neural architecture is connected with "zero convolutions" (zero-initialized convolution layers) that progressively grow the parameters from zero and ensure that no harmful noise could affect the finetuning. We test various conditioning controls, eg, edges, depth, segmentation, human pose, etc, with Stable Diffusion, using single or multiple conditions, with or without prompts. We show that the training of ControlNets is robust with small (<50k) and large (>1m) datasets. Extensive results show that ControlNet may facilitate wider applications to control image diffusion models.*
|
||||||
|
|
||||||
|
## Loading from the original format
|
||||||
|
|
||||||
|
By default the [`SD3ControlNetModel`] should be loaded with [`~ModelMixin.from_pretrained`].
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers import StableDiffusion3ControlNetPipeline
|
||||||
|
from diffusers.models import SD3ControlNetModel, SD3MultiControlNetModel
|
||||||
|
|
||||||
|
controlnet = SD3ControlNetModel.from_pretrained("InstantX/SD3-Controlnet-Canny")
|
||||||
|
pipe = StableDiffusion3ControlNetPipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet)
|
||||||
|
```
|
||||||
|
|
||||||
|
## SD3ControlNetModel
|
||||||
|
|
||||||
|
[[autodoc]] SD3ControlNetModel
|
||||||
|
|
||||||
|
## SD3ControlNetOutput
|
||||||
|
|
||||||
|
[[autodoc]] models.controlnet_sd3.SD3ControlNetOutput
|
||||||
|
|
||||||
19
docs/source/en/api/models/dit_transformer2d.md
Normal file
19
docs/source/en/api/models/dit_transformer2d.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# DiTTransformer2DModel
|
||||||
|
|
||||||
|
A Transformer model for image-like data from [DiT](https://huggingface.co/papers/2212.09748).
|
||||||
|
|
||||||
|
## DiTTransformer2DModel
|
||||||
|
|
||||||
|
[[autodoc]] DiTTransformer2DModel
|
||||||
20
docs/source/en/api/models/hunyuan_transformer2d.md
Normal file
20
docs/source/en/api/models/hunyuan_transformer2d.md
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# HunyuanDiT2DModel
|
||||||
|
|
||||||
|
A Diffusion Transformer model for 2D data from [Hunyuan-DiT](https://github.com/Tencent/HunyuanDiT).
|
||||||
|
|
||||||
|
## HunyuanDiT2DModel
|
||||||
|
|
||||||
|
[[autodoc]] HunyuanDiT2DModel
|
||||||
|
|
||||||
19
docs/source/en/api/models/pixart_transformer2d.md
Normal file
19
docs/source/en/api/models/pixart_transformer2d.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# PixArtTransformer2DModel
|
||||||
|
|
||||||
|
A Transformer model for image-like data from [PixArt-Alpha](https://huggingface.co/papers/2310.00426) and [PixArt-Sigma](https://huggingface.co/papers/2403.04692).
|
||||||
|
|
||||||
|
## PixArtTransformer2DModel
|
||||||
|
|
||||||
|
[[autodoc]] PixArtTransformer2DModel
|
||||||
@@ -10,7 +10,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
|
|||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Prior Transformer
|
# PriorTransformer
|
||||||
|
|
||||||
The Prior Transformer was originally introduced in [Hierarchical Text-Conditional Image Generation with CLIP Latents](https://huggingface.co/papers/2204.06125) by Ramesh et al. It is used to predict CLIP image embeddings from CLIP text embeddings; image embeddings are predicted through a denoising diffusion process.
|
The Prior Transformer was originally introduced in [Hierarchical Text-Conditional Image Generation with CLIP Latents](https://huggingface.co/papers/2204.06125) by Ramesh et al. It is used to predict CLIP image embeddings from CLIP text embeddings; image embeddings are predicted through a denoising diffusion process.
|
||||||
|
|
||||||
|
|||||||
19
docs/source/en/api/models/sd3_transformer2d.md
Normal file
19
docs/source/en/api/models/sd3_transformer2d.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# SD3 Transformer Model
|
||||||
|
|
||||||
|
The Transformer model introduced in [Stable Diffusion 3](https://hf.co/papers/2403.03206). Its novelty lies in the MMDiT transformer block.
|
||||||
|
|
||||||
|
## SD3Transformer2DModel
|
||||||
|
|
||||||
|
[[autodoc]] SD3Transformer2DModel
|
||||||
@@ -10,7 +10,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
|
|||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Transformer2D
|
# Transformer2DModel
|
||||||
|
|
||||||
A Transformer model for image-like data from [CompVis](https://huggingface.co/CompVis) that is based on the [Vision Transformer](https://huggingface.co/papers/2010.11929) introduced by Dosovitskiy et al. The [`Transformer2DModel`] accepts discrete (classes of vector embeddings) or continuous (actual embeddings) inputs.
|
A Transformer model for image-like data from [CompVis](https://huggingface.co/CompVis) that is based on the [Vision Transformer](https://huggingface.co/papers/2010.11929) introduced by Dosovitskiy et al. The [`Transformer2DModel`] accepts discrete (classes of vector embeddings) or continuous (actual embeddings) inputs.
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
|
|||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Transformer Temporal
|
# TransformerTemporalModel
|
||||||
|
|
||||||
A Transformer model for video-like data.
|
A Transformer model for video-like data.
|
||||||
|
|
||||||
|
|||||||
@@ -24,4 +24,4 @@ The abstract from the paper is:
|
|||||||
|
|
||||||
## VQEncoderOutput
|
## VQEncoderOutput
|
||||||
|
|
||||||
[[autodoc]] models.vq_model.VQEncoderOutput
|
[[autodoc]] models.autoencoders.vq_model.VQEncoderOutput
|
||||||
|
|||||||
@@ -78,7 +78,6 @@ output = pipe(
|
|||||||
)
|
)
|
||||||
frames = output.frames[0]
|
frames = output.frames[0]
|
||||||
export_to_gif(frames, "animation.gif")
|
export_to_gif(frames, "animation.gif")
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Here are some sample outputs:
|
Here are some sample outputs:
|
||||||
@@ -101,6 +100,53 @@ AnimateDiff tends to work better with finetuned Stable Diffusion models. If you
|
|||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
|
### AnimateDiffSDXLPipeline
|
||||||
|
|
||||||
|
AnimateDiff can also be used with SDXL models. This is currently an experimental feature as only a beta release of the motion adapter checkpoint is available.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
from diffusers.models import MotionAdapter
|
||||||
|
from diffusers import AnimateDiffSDXLPipeline, DDIMScheduler
|
||||||
|
from diffusers.utils import export_to_gif
|
||||||
|
|
||||||
|
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-sdxl-beta", torch_dtype=torch.float16)
|
||||||
|
|
||||||
|
model_id = "stabilityai/stable-diffusion-xl-base-1.0"
|
||||||
|
scheduler = DDIMScheduler.from_pretrained(
|
||||||
|
model_id,
|
||||||
|
subfolder="scheduler",
|
||||||
|
clip_sample=False,
|
||||||
|
timestep_spacing="linspace",
|
||||||
|
beta_schedule="linear",
|
||||||
|
steps_offset=1,
|
||||||
|
)
|
||||||
|
pipe = AnimateDiffSDXLPipeline.from_pretrained(
|
||||||
|
model_id,
|
||||||
|
motion_adapter=adapter,
|
||||||
|
scheduler=scheduler,
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
variant="fp16",
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
# enable memory savings
|
||||||
|
pipe.enable_vae_slicing()
|
||||||
|
pipe.enable_vae_tiling()
|
||||||
|
|
||||||
|
output = pipe(
|
||||||
|
prompt="a panda surfing in the ocean, realistic, high quality",
|
||||||
|
negative_prompt="low quality, worst quality",
|
||||||
|
num_inference_steps=20,
|
||||||
|
guidance_scale=8,
|
||||||
|
width=1024,
|
||||||
|
height=1024,
|
||||||
|
num_frames=16,
|
||||||
|
)
|
||||||
|
|
||||||
|
frames = output.frames[0]
|
||||||
|
export_to_gif(frames, "animation.gif")
|
||||||
|
```
|
||||||
|
|
||||||
### AnimateDiffVideoToVideoPipeline
|
### AnimateDiffVideoToVideoPipeline
|
||||||
|
|
||||||
AnimateDiff can also be used to generate visually similar videos or enable style/character/background or other edits starting from an initial video, allowing you to seamlessly explore creative possibilities.
|
AnimateDiff can also be used to generate visually similar videos or enable style/character/background or other edits starting from an initial video, allowing you to seamlessly explore creative possibilities.
|
||||||
@@ -118,7 +164,7 @@ from PIL import Image
|
|||||||
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
|
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
|
||||||
# load SD 1.5 based finetuned model
|
# load SD 1.5 based finetuned model
|
||||||
model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
|
model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
|
||||||
pipe = AnimateDiffVideoToVideoPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16).to("cuda")
|
pipe = AnimateDiffVideoToVideoPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16)
|
||||||
scheduler = DDIMScheduler.from_pretrained(
|
scheduler = DDIMScheduler.from_pretrained(
|
||||||
model_id,
|
model_id,
|
||||||
subfolder="scheduler",
|
subfolder="scheduler",
|
||||||
@@ -256,7 +302,6 @@ output = pipe(
|
|||||||
)
|
)
|
||||||
frames = output.frames[0]
|
frames = output.frames[0]
|
||||||
export_to_gif(frames, "animation.gif")
|
export_to_gif(frames, "animation.gif")
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<table>
|
<table>
|
||||||
@@ -331,7 +376,6 @@ output = pipe(
|
|||||||
)
|
)
|
||||||
frames = output.frames[0]
|
frames = output.frames[0]
|
||||||
export_to_gif(frames, "animation.gif")
|
export_to_gif(frames, "animation.gif")
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<table>
|
<table>
|
||||||
@@ -522,6 +566,12 @@ export_to_gif(frames, "animatelcm-motion-lora.gif")
|
|||||||
- all
|
- all
|
||||||
- __call__
|
- __call__
|
||||||
|
|
||||||
|
## AnimateDiffSDXLPipeline
|
||||||
|
|
||||||
|
[[autodoc]] AnimateDiffSDXLPipeline
|
||||||
|
- all
|
||||||
|
- __call__
|
||||||
|
|
||||||
## AnimateDiffVideoToVideoPipeline
|
## AnimateDiffVideoToVideoPipeline
|
||||||
|
|
||||||
[[autodoc]] AnimateDiffVideoToVideoPipeline
|
[[autodoc]] AnimateDiffVideoToVideoPipeline
|
||||||
|
|||||||
@@ -12,42 +12,10 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
# AutoPipeline
|
# AutoPipeline
|
||||||
|
|
||||||
`AutoPipeline` is designed to:
|
The `AutoPipeline` is designed to make it easy to load a checkpoint for a task without needing to know the specific pipeline class. Based on the task, the `AutoPipeline` automatically retrieves the correct pipeline class from the checkpoint `model_index.json` file.
|
||||||
|
|
||||||
1. make it easy for you to load a checkpoint for a task without knowing the specific pipeline class to use
|
|
||||||
2. use multiple pipelines in your workflow
|
|
||||||
|
|
||||||
Based on the task, the `AutoPipeline` class automatically retrieves the relevant pipeline given the name or path to the pretrained weights with the `from_pretrained()` method.
|
|
||||||
|
|
||||||
To seamlessly switch between tasks with the same checkpoint without reallocating additional memory, use the `from_pipe()` method to transfer the components from the original pipeline to the new one.
|
|
||||||
|
|
||||||
```py
|
|
||||||
from diffusers import AutoPipelineForText2Image
|
|
||||||
import torch
|
|
||||||
|
|
||||||
pipeline = AutoPipelineForText2Image.from_pretrained(
|
|
||||||
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True
|
|
||||||
).to("cuda")
|
|
||||||
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
|
|
||||||
|
|
||||||
image = pipeline(prompt, num_inference_steps=25).images[0]
|
|
||||||
```
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
|
|
||||||
Check out the [AutoPipeline](../../tutorials/autopipeline) tutorial to learn how to use this API!
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
`AutoPipeline` supports text-to-image, image-to-image, and inpainting for the following diffusion models:
|
|
||||||
|
|
||||||
- [Stable Diffusion](./stable_diffusion/overview)
|
|
||||||
- [ControlNet](./controlnet)
|
|
||||||
- [Stable Diffusion XL (SDXL)](./stable_diffusion/stable_diffusion_xl)
|
|
||||||
- [DeepFloyd IF](./deepfloyd_if)
|
|
||||||
- [Kandinsky 2.1](./kandinsky)
|
|
||||||
- [Kandinsky 2.2](./kandinsky_v22)
|
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Check out the [AutoPipeline](../../tutorials/autopipeline) tutorial to learn how to use this API!
|
||||||
|
|
||||||
## AutoPipelineForText2Image
|
## AutoPipelineForText2Image
|
||||||
|
|
||||||
|
|||||||
39
docs/source/en/api/pipelines/controlnet_sd3.md
Normal file
39
docs/source/en/api/pipelines/controlnet_sd3.md
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
<!--Copyright 2023 The HuggingFace Team and The InstantX Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# ControlNet with Stable Diffusion 3
|
||||||
|
|
||||||
|
StableDiffusion3ControlNetPipeline is an implementation of ControlNet for Stable Diffusion 3.
|
||||||
|
|
||||||
|
ControlNet was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala.
|
||||||
|
|
||||||
|
With a ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide a depth map, the ControlNet model generates an image that'll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process.
|
||||||
|
|
||||||
|
The abstract from the paper is:
|
||||||
|
|
||||||
|
*We present ControlNet, a neural network architecture to add spatial conditioning controls to large, pretrained text-to-image diffusion models. ControlNet locks the production-ready large diffusion models, and reuses their deep and robust encoding layers pretrained with billions of images as a strong backbone to learn a diverse set of conditional controls. The neural architecture is connected with "zero convolutions" (zero-initialized convolution layers) that progressively grow the parameters from zero and ensure that no harmful noise could affect the finetuning. We test various conditioning controls, eg, edges, depth, segmentation, human pose, etc, with Stable Diffusion, using single or multiple conditions, with or without prompts. We show that the training of ControlNets is robust with small (<50k) and large (>1m) datasets. Extensive results show that ControlNet may facilitate wider applications to control image diffusion models.*
|
||||||
|
|
||||||
|
This code is implemented by [The InstantX Team](https://huggingface.co/InstantX). You can find pre-trained checkpoints for SD3-ControlNet on [The InstantX Team](https://huggingface.co/InstantX) Hub profile.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
## StableDiffusion3ControlNetPipeline
|
||||||
|
[[autodoc]] StableDiffusion3ControlNetPipeline
|
||||||
|
- all
|
||||||
|
- __call__
|
||||||
|
|
||||||
|
## StableDiffusion3PipelineOutput
|
||||||
|
[[autodoc]] pipelines.stable_diffusion_3.pipeline_output.StableDiffusion3PipelineOutput
|
||||||
95
docs/source/en/api/pipelines/hunyuandit.md
Normal file
95
docs/source/en/api/pipelines/hunyuandit.md
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Hunyuan-DiT
|
||||||
|

|
||||||
|
|
||||||
|
[Hunyuan-DiT : A Powerful Multi-Resolution Diffusion Transformer with Fine-Grained Chinese Understanding](https://arxiv.org/abs/2405.08748) from Tencent Hunyuan.
|
||||||
|
|
||||||
|
The abstract from the paper is:
|
||||||
|
|
||||||
|
*We present Hunyuan-DiT, a text-to-image diffusion transformer with fine-grained understanding of both English and Chinese. To construct Hunyuan-DiT, we carefully design the transformer structure, text encoder, and positional encoding. We also build from scratch a whole data pipeline to update and evaluate data for iterative model optimization. For fine-grained language understanding, we train a Multimodal Large Language Model to refine the captions of the images. Finally, Hunyuan-DiT can perform multi-turn multimodal dialogue with users, generating and refining images according to the context. Through our holistic human evaluation protocol with more than 50 professional human evaluators, Hunyuan-DiT sets a new state-of-the-art in Chinese-to-image generation compared with other open-source models.*
|
||||||
|
|
||||||
|
|
||||||
|
You can find the original codebase at [Tencent/HunyuanDiT](https://github.com/Tencent/HunyuanDiT) and all the available checkpoints at [Tencent-Hunyuan](https://huggingface.co/Tencent-Hunyuan/HunyuanDiT).
|
||||||
|
|
||||||
|
**Highlights**: HunyuanDiT supports Chinese/English-to-image, multi-resolution generation.
|
||||||
|
|
||||||
|
HunyuanDiT has the following components:
|
||||||
|
* It uses a diffusion transformer as the backbone
|
||||||
|
* It combines two text encoders, a bilingual CLIP and a multilingual T5 encoder
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers.md) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading.md#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
## Optimization
|
||||||
|
|
||||||
|
You can optimize the pipeline's runtime and memory consumption with torch.compile and feed-forward chunking. To learn about other optimization methods, check out the [Speed up inference](../../optimization/fp16) and [Reduce memory usage](../../optimization/memory) guides.
|
||||||
|
|
||||||
|
### Inference
|
||||||
|
|
||||||
|
Use [`torch.compile`](https://huggingface.co/docs/diffusers/main/en/tutorials/fast_diffusion#torchcompile) to reduce the inference latency.
|
||||||
|
|
||||||
|
First, load the pipeline:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import HunyuanDiTPipeline
|
||||||
|
import torch
|
||||||
|
|
||||||
|
pipeline = HunyuanDiTPipeline.from_pretrained(
|
||||||
|
"Tencent-Hunyuan/HunyuanDiT-Diffusers", torch_dtype=torch.float16
|
||||||
|
).to("cuda")
|
||||||
|
```
|
||||||
|
|
||||||
|
Then change the memory layout of the pipelines `transformer` and `vae` components to `torch.channels-last`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
pipeline.transformer.to(memory_format=torch.channels_last)
|
||||||
|
pipeline.vae.to(memory_format=torch.channels_last)
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, compile the components and run inference:
|
||||||
|
|
||||||
|
```python
|
||||||
|
pipeline.transformer = torch.compile(pipeline.transformer, mode="max-autotune", fullgraph=True)
|
||||||
|
pipeline.vae.decode = torch.compile(pipeline.vae.decode, mode="max-autotune", fullgraph=True)
|
||||||
|
|
||||||
|
image = pipeline(prompt="一个宇航员在骑马").images[0]
|
||||||
|
```
|
||||||
|
|
||||||
|
The [benchmark](https://gist.github.com/sayakpaul/29d3a14905cfcbf611fe71ebd22e9b23) results on a 80GB A100 machine are:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
With torch.compile(): Average inference time: 12.470 seconds.
|
||||||
|
Without torch.compile(): Average inference time: 20.570 seconds.
|
||||||
|
```
|
||||||
|
|
||||||
|
### Memory optimization
|
||||||
|
|
||||||
|
By loading the T5 text encoder in 8 bits, you can run the pipeline in just under 6 GBs of GPU VRAM. Refer to [this script](https://gist.github.com/sayakpaul/3154605f6af05b98a41081aaba5ca43e) for details.
|
||||||
|
|
||||||
|
Furthermore, you can use the [`~HunyuanDiT2DModel.enable_forward_chunking`] method to reduce memory usage. Feed-forward chunking runs the feed-forward layers in a transformer block in a loop instead of all at once. This gives you a trade-off between memory consumption and inference runtime.
|
||||||
|
|
||||||
|
```diff
|
||||||
|
+ pipeline.transformer.enable_forward_chunking(chunk_size=1, dim=1)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## HunyuanDiTPipeline
|
||||||
|
|
||||||
|
[[autodoc]] HunyuanDiTPipeline
|
||||||
|
- all
|
||||||
|
- __call__
|
||||||
|
|
||||||
@@ -47,6 +47,7 @@ Sample output with I2VGenXL:
|
|||||||
* Unlike SVD, it additionally accepts text prompts as inputs.
|
* Unlike SVD, it additionally accepts text prompts as inputs.
|
||||||
* It can generate higher resolution videos.
|
* It can generate higher resolution videos.
|
||||||
* When using the [`DDIMScheduler`] (which is default for this pipeline), less than 50 steps for inference leads to bad results.
|
* When using the [`DDIMScheduler`] (which is default for this pipeline), less than 50 steps for inference leads to bad results.
|
||||||
|
* This implementation is 1-stage variant of I2VGenXL. The main figure in the [I2VGen-XL](https://arxiv.org/abs/2311.04145) paper shows a 2-stage variant, however, 1-stage variant works well. See [this discussion](https://github.com/huggingface/diffusers/discussions/7952) for more details.
|
||||||
|
|
||||||
## I2VGenXLPipeline
|
## I2VGenXLPipeline
|
||||||
[[autodoc]] I2VGenXLPipeline
|
[[autodoc]] I2VGenXLPipeline
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
Kandinsky 3 is created by [Vladimir Arkhipkin](https://github.com/oriBetelgeuse),[Anastasia Maltseva](https://github.com/NastyaMittseva),[Igor Pavlov](https://github.com/boomb0om),[Andrei Filatov](https://github.com/anvilarth),[Arseniy Shakhmatov](https://github.com/cene555),[Andrey Kuznetsov](https://github.com/kuznetsoffandrey),[Denis Dimitrov](https://github.com/denndimitrov), [Zein Shaheen](https://github.com/zeinsh)
|
Kandinsky 3 is created by [Vladimir Arkhipkin](https://github.com/oriBetelgeuse),[Anastasia Maltseva](https://github.com/NastyaMittseva),[Igor Pavlov](https://github.com/boomb0om),[Andrei Filatov](https://github.com/anvilarth),[Arseniy Shakhmatov](https://github.com/cene555),[Andrey Kuznetsov](https://github.com/kuznetsoffandrey),[Denis Dimitrov](https://github.com/denndimitrov), [Zein Shaheen](https://github.com/zeinsh)
|
||||||
|
|
||||||
The description from it's Github page:
|
The description from it's GitHub page:
|
||||||
|
|
||||||
*Kandinsky 3.0 is an open-source text-to-image diffusion model built upon the Kandinsky2-x model family. In comparison to its predecessors, enhancements have been made to the text understanding and visual quality of the model, achieved by increasing the size of the text encoder and Diffusion U-Net models, respectively.*
|
*Kandinsky 3.0 is an open-source text-to-image diffusion model built upon the Kandinsky2-x model family. In comparison to its predecessors, enhancements have been made to the text understanding and visual quality of the model, achieved by increasing the size of the text encoder and Diffusion U-Net models, respectively.*
|
||||||
|
|
||||||
|
|||||||
76
docs/source/en/api/pipelines/marigold.md
Normal file
76
docs/source/en/api/pipelines/marigold.md
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
<!--Copyright 2024 Marigold authors and The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Marigold Pipelines for Computer Vision Tasks
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Marigold was proposed in [Repurposing Diffusion-Based Image Generators for Monocular Depth Estimation](https://huggingface.co/papers/2312.02145), a CVPR 2024 Oral paper by [Bingxin Ke](http://www.kebingxin.com/), [Anton Obukhov](https://www.obukhov.ai/), [Shengyu Huang](https://shengyuh.github.io/), [Nando Metzger](https://nandometzger.github.io/), [Rodrigo Caye Daudt](https://rcdaudt.github.io/), and [Konrad Schindler](https://scholar.google.com/citations?user=FZuNgqIAAAAJ&hl=en).
|
||||||
|
The idea is to repurpose the rich generative prior of Text-to-Image Latent Diffusion Models (LDMs) for traditional computer vision tasks.
|
||||||
|
Initially, this idea was explored to fine-tune Stable Diffusion for Monocular Depth Estimation, as shown in the teaser above.
|
||||||
|
Later,
|
||||||
|
- [Tianfu Wang](https://tianfwang.github.io/) trained the first Latent Consistency Model (LCM) of Marigold, which unlocked fast single-step inference;
|
||||||
|
- [Kevin Qu](https://www.linkedin.com/in/kevin-qu-b3417621b/?locale=en_US) extended the approach to Surface Normals Estimation;
|
||||||
|
- [Anton Obukhov](https://www.obukhov.ai/) contributed the pipelines and documentation into diffusers (enabled and supported by [YiYi Xu](https://yiyixuxu.github.io/) and [Sayak Paul](https://sayak.dev/)).
|
||||||
|
|
||||||
|
The abstract from the paper is:
|
||||||
|
|
||||||
|
*Monocular depth estimation is a fundamental computer vision task. Recovering 3D depth from a single image is geometrically ill-posed and requires scene understanding, so it is not surprising that the rise of deep learning has led to a breakthrough. The impressive progress of monocular depth estimators has mirrored the growth in model capacity, from relatively modest CNNs to large Transformer architectures. Still, monocular depth estimators tend to struggle when presented with images with unfamiliar content and layout, since their knowledge of the visual world is restricted by the data seen during training, and challenged by zero-shot generalization to new domains. This motivates us to explore whether the extensive priors captured in recent generative diffusion models can enable better, more generalizable depth estimation. We introduce Marigold, a method for affine-invariant monocular depth estimation that is derived from Stable Diffusion and retains its rich prior knowledge. The estimator can be fine-tuned in a couple of days on a single GPU using only synthetic training data. It delivers state-of-the-art performance across a wide range of datasets, including over 20% performance gains in specific cases. Project page: https://marigoldmonodepth.github.io.*
|
||||||
|
|
||||||
|
## Available Pipelines
|
||||||
|
|
||||||
|
Each pipeline supports one Computer Vision task, which takes an input RGB image as input and produces a *prediction* of the modality of interest, such as a depth map of the input image.
|
||||||
|
Currently, the following tasks are implemented:
|
||||||
|
|
||||||
|
| Pipeline | Predicted Modalities | Demos |
|
||||||
|
|---------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------:|
|
||||||
|
| [MarigoldDepthPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py) | [Depth](https://en.wikipedia.org/wiki/Depth_map), [Disparity](https://en.wikipedia.org/wiki/Binocular_disparity) | [Fast Demo (LCM)](https://huggingface.co/spaces/prs-eth/marigold-lcm), [Slow Original Demo (DDIM)](https://huggingface.co/spaces/prs-eth/marigold) |
|
||||||
|
| [MarigoldNormalsPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py) | [Surface normals](https://en.wikipedia.org/wiki/Normal_mapping) | [Fast Demo (LCM)](https://huggingface.co/spaces/prs-eth/marigold-normals-lcm) |
|
||||||
|
|
||||||
|
|
||||||
|
## Available Checkpoints
|
||||||
|
|
||||||
|
The original checkpoints can be found under the [PRS-ETH](https://huggingface.co/prs-eth/) Hugging Face organization.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. Also, to know more about reducing the memory usage of this pipeline, refer to the ["Reduce memory usage"] section [here](../../using-diffusers/svd#reduce-memory-usage).
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
Marigold pipelines were designed and tested only with `DDIMScheduler` and `LCMScheduler`.
|
||||||
|
Depending on the scheduler, the number of inference steps required to get reliable predictions varies, and there is no universal value that works best across schedulers.
|
||||||
|
Because of that, the default value of `num_inference_steps` in the `__call__` method of the pipeline is set to `None` (see the API reference).
|
||||||
|
Unless set explicitly, its value will be taken from the checkpoint configuration `model_index.json`.
|
||||||
|
This is done to ensure high-quality predictions when calling the pipeline with just the `image` argument.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
See also Marigold [usage examples](marigold_usage).
|
||||||
|
|
||||||
|
## MarigoldDepthPipeline
|
||||||
|
[[autodoc]] MarigoldDepthPipeline
|
||||||
|
- all
|
||||||
|
- __call__
|
||||||
|
|
||||||
|
## MarigoldNormalsPipeline
|
||||||
|
[[autodoc]] MarigoldNormalsPipeline
|
||||||
|
- all
|
||||||
|
- __call__
|
||||||
|
|
||||||
|
## MarigoldDepthOutput
|
||||||
|
[[autodoc]] pipelines.marigold.pipeline_marigold_depth.MarigoldDepthOutput
|
||||||
|
|
||||||
|
## MarigoldNormalsOutput
|
||||||
|
[[autodoc]] pipelines.marigold.pipeline_marigold_normals.MarigoldNormalsOutput
|
||||||
@@ -97,6 +97,11 @@ The table below lists all the pipelines currently available in 🤗 Diffusers an
|
|||||||
- to
|
- to
|
||||||
- components
|
- components
|
||||||
|
|
||||||
|
|
||||||
|
[[autodoc]] pipelines.StableDiffusionMixin.enable_freeu
|
||||||
|
|
||||||
|
[[autodoc]] pipelines.StableDiffusionMixin.disable_freeu
|
||||||
|
|
||||||
## FlaxDiffusionPipeline
|
## FlaxDiffusionPipeline
|
||||||
|
|
||||||
[[autodoc]] pipelines.pipeline_flax_utils.FlaxDiffusionPipeline
|
[[autodoc]] pipelines.pipeline_flax_utils.FlaxDiffusionPipeline
|
||||||
|
|||||||
41
docs/source/en/api/pipelines/pag.md
Normal file
41
docs/source/en/api/pipelines/pag.md
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Perturbed-Attention Guidance
|
||||||
|
|
||||||
|
[Perturbed-Attention Guidance (PAG)](https://ku-cvlab.github.io/Perturbed-Attention-Guidance/) is a new diffusion sampling guidance that improves sample quality across both unconditional and conditional settings, achieving this without requiring further training or the integration of external modules.
|
||||||
|
|
||||||
|
PAG was introduced in [Self-Rectifying Diffusion Sampling with Perturbed-Attention Guidance](https://huggingface.co/papers/2403.17377) by Donghoon Ahn, Hyoungwon Cho, Jaewon Min, Wooseok Jang, Jungwoo Kim, SeonHwa Kim, Hyun Hee Park, Kyong Hwan Jin and Seungryong Kim.
|
||||||
|
|
||||||
|
The abstract from the paper is:
|
||||||
|
|
||||||
|
*Recent studies have demonstrated that diffusion models are capable of generating high-quality samples, but their quality heavily depends on sampling guidance techniques, such as classifier guidance (CG) and classifier-free guidance (CFG). These techniques are often not applicable in unconditional generation or in various downstream tasks such as image restoration. In this paper, we propose a novel sampling guidance, called Perturbed-Attention Guidance (PAG), which improves diffusion sample quality across both unconditional and conditional settings, achieving this without requiring additional training or the integration of external modules. PAG is designed to progressively enhance the structure of samples throughout the denoising process. It involves generating intermediate samples with degraded structure by substituting selected self-attention maps in diffusion U-Net with an identity matrix, by considering the self-attention mechanisms' ability to capture structural information, and guiding the denoising process away from these degraded samples. In both ADM and Stable Diffusion, PAG surprisingly improves sample quality in conditional and even unconditional scenarios. Moreover, PAG significantly improves the baseline performance in various downstream tasks where existing guidances such as CG or CFG cannot be fully utilized, including ControlNet with empty prompts and image restoration such as inpainting and deblurring.*
|
||||||
|
|
||||||
|
## StableDiffusionXLPAGPipeline
|
||||||
|
[[autodoc]] StableDiffusionXLPAGPipeline
|
||||||
|
- all
|
||||||
|
- __call__
|
||||||
|
|
||||||
|
## StableDiffusionXLPAGImg2ImgPipeline
|
||||||
|
[[autodoc]] StableDiffusionXLPAGImg2ImgPipeline
|
||||||
|
- all
|
||||||
|
- __call__
|
||||||
|
|
||||||
|
## StableDiffusionXLPAGInpaintPipeline
|
||||||
|
[[autodoc]] StableDiffusionXLPAGInpaintPipeline
|
||||||
|
- all
|
||||||
|
- __call__
|
||||||
|
|
||||||
|
## StableDiffusionXLControlNetPAGPipeline
|
||||||
|
[[autodoc]] StableDiffusionXLControlNetPAGPipeline
|
||||||
|
- all
|
||||||
|
- __call__
|
||||||
@@ -31,7 +31,7 @@ Some notes about this pipeline:
|
|||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
|
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
|
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers.md) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading.md#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
@@ -75,7 +75,7 @@ with torch.no_grad():
|
|||||||
prompt_embeds, prompt_attention_mask, negative_embeds, negative_prompt_attention_mask = pipe.encode_prompt(prompt)
|
prompt_embeds, prompt_attention_mask, negative_embeds, negative_prompt_attention_mask = pipe.encode_prompt(prompt)
|
||||||
```
|
```
|
||||||
|
|
||||||
Since text embeddings have been computed, remove the `text_encoder` and `pipe` from the memory, and free up som GPU VRAM:
|
Since text embeddings have been computed, remove the `text_encoder` and `pipe` from the memory, and free up some GPU VRAM:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import gc
|
import gc
|
||||||
@@ -146,4 +146,3 @@ While loading the `text_encoder`, you set `load_in_8bit` to `True`. You could al
|
|||||||
[[autodoc]] PixArtAlphaPipeline
|
[[autodoc]] PixArtAlphaPipeline
|
||||||
- all
|
- all
|
||||||
- __call__
|
- __call__
|
||||||
|
|
||||||
149
docs/source/en/api/pipelines/pixart_sigma.md
Normal file
149
docs/source/en/api/pipelines/pixart_sigma.md
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# PixArt-Σ
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
[PixArt-Σ: Weak-to-Strong Training of Diffusion Transformer for 4K Text-to-Image Generation](https://huggingface.co/papers/2403.04692) is Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, and Zhenguo Li.
|
||||||
|
|
||||||
|
The abstract from the paper is:
|
||||||
|
|
||||||
|
*In this paper, we introduce PixArt-Σ, a Diffusion Transformer model (DiT) capable of directly generating images at 4K resolution. PixArt-Σ represents a significant advancement over its predecessor, PixArt-α, offering images of markedly higher fidelity and improved alignment with text prompts. A key feature of PixArt-Σ is its training efficiency. Leveraging the foundational pre-training of PixArt-α, it evolves from the ‘weaker’ baseline to a ‘stronger’ model via incorporating higher quality data, a process we term “weak-to-strong training”. The advancements in PixArt-Σ are twofold: (1) High-Quality Training Data: PixArt-Σ incorporates superior-quality image data, paired with more precise and detailed image captions. (2) Efficient Token Compression: we propose a novel attention module within the DiT framework that compresses both keys and values, significantly improving efficiency and facilitating ultra-high-resolution image generation. Thanks to these improvements, PixArt-Σ achieves superior image quality and user prompt adherence capabilities with significantly smaller model size (0.6B parameters) than existing text-to-image diffusion models, such as SDXL (2.6B parameters) and SD Cascade (5.1B parameters). Moreover, PixArt-Σ’s capability to generate 4K images supports the creation of high-resolution posters and wallpapers, efficiently bolstering the production of highquality visual content in industries such as film and gaming.*
|
||||||
|
|
||||||
|
You can find the original codebase at [PixArt-alpha/PixArt-sigma](https://github.com/PixArt-alpha/PixArt-sigma) and all the available checkpoints at [PixArt-alpha](https://huggingface.co/PixArt-alpha).
|
||||||
|
|
||||||
|
Some notes about this pipeline:
|
||||||
|
|
||||||
|
* It uses a Transformer backbone (instead of a UNet) for denoising. As such it has a similar architecture as [DiT](https://hf.co/docs/transformers/model_doc/dit).
|
||||||
|
* It was trained using text conditions computed from T5. This aspect makes the pipeline better at following complex text prompts with intricate details.
|
||||||
|
* It is good at producing high-resolution images at different aspect ratios. To get the best results, the authors recommend some size brackets which can be found [here](https://github.com/PixArt-alpha/PixArt-sigma/blob/master/diffusion/data/datasets/utils.py).
|
||||||
|
* It rivals the quality of state-of-the-art text-to-image generation systems (as of this writing) such as PixArt-α, Stable Diffusion XL, Playground V2.0 and DALL-E 3, while being more efficient than them.
|
||||||
|
* It shows the ability of generating super high resolution images, such as 2048px or even 4K.
|
||||||
|
* It shows that text-to-image models can grow from a weak model to a stronger one through several improvements (VAEs, datasets, and so on.)
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
## Inference with under 8GB GPU VRAM
|
||||||
|
|
||||||
|
Run the [`PixArtSigmaPipeline`] with under 8GB GPU VRAM by loading the text encoder in 8-bit precision. Let's walk through a full-fledged example.
|
||||||
|
|
||||||
|
First, install the [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) library:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install -U bitsandbytes
|
||||||
|
```
|
||||||
|
|
||||||
|
Then load the text encoder in 8-bit:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import T5EncoderModel
|
||||||
|
from diffusers import PixArtSigmaPipeline
|
||||||
|
import torch
|
||||||
|
|
||||||
|
text_encoder = T5EncoderModel.from_pretrained(
|
||||||
|
"PixArt-alpha/PixArt-Sigma-XL-2-1024-MS",
|
||||||
|
subfolder="text_encoder",
|
||||||
|
load_in_8bit=True,
|
||||||
|
device_map="auto",
|
||||||
|
)
|
||||||
|
pipe = PixArtSigmaPipeline.from_pretrained(
|
||||||
|
"PixArt-alpha/PixArt-Sigma-XL-2-1024-MS",
|
||||||
|
text_encoder=text_encoder,
|
||||||
|
transformer=None,
|
||||||
|
device_map="balanced"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, use the `pipe` to encode a prompt:
|
||||||
|
|
||||||
|
```python
|
||||||
|
with torch.no_grad():
|
||||||
|
prompt = "cute cat"
|
||||||
|
prompt_embeds, prompt_attention_mask, negative_embeds, negative_prompt_attention_mask = pipe.encode_prompt(prompt)
|
||||||
|
```
|
||||||
|
|
||||||
|
Since text embeddings have been computed, remove the `text_encoder` and `pipe` from the memory, and free up some GPU VRAM:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import gc
|
||||||
|
|
||||||
|
def flush():
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
|
del text_encoder
|
||||||
|
del pipe
|
||||||
|
flush()
|
||||||
|
```
|
||||||
|
|
||||||
|
Then compute the latents with the prompt embeddings as inputs:
|
||||||
|
|
||||||
|
```python
|
||||||
|
pipe = PixArtSigmaPipeline.from_pretrained(
|
||||||
|
"PixArt-alpha/PixArt-Sigma-XL-2-1024-MS",
|
||||||
|
text_encoder=None,
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
latents = pipe(
|
||||||
|
negative_prompt=None,
|
||||||
|
prompt_embeds=prompt_embeds,
|
||||||
|
negative_prompt_embeds=negative_embeds,
|
||||||
|
prompt_attention_mask=prompt_attention_mask,
|
||||||
|
negative_prompt_attention_mask=negative_prompt_attention_mask,
|
||||||
|
num_images_per_prompt=1,
|
||||||
|
output_type="latent",
|
||||||
|
).images
|
||||||
|
|
||||||
|
del pipe.transformer
|
||||||
|
flush()
|
||||||
|
```
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
Notice that while initializing `pipe`, you're setting `text_encoder` to `None` so that it's not loaded.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
Once the latents are computed, pass it off to the VAE to decode into a real image:
|
||||||
|
|
||||||
|
```python
|
||||||
|
with torch.no_grad():
|
||||||
|
image = pipe.vae.decode(latents / pipe.vae.config.scaling_factor, return_dict=False)[0]
|
||||||
|
image = pipe.image_processor.postprocess(image, output_type="pil")[0]
|
||||||
|
image.save("cat.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
By deleting components you aren't using and flushing the GPU VRAM, you should be able to run [`PixArtSigmaPipeline`] with under 8GB GPU VRAM.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
If you want a report of your memory-usage, run this [script](https://gist.github.com/sayakpaul/3ae0f847001d342af27018a96f467e4e).
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
Text embeddings computed in 8-bit can impact the quality of the generated images because of the information loss in the representation space caused by the reduced precision. It's recommended to compare the outputs with and without 8-bit.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
While loading the `text_encoder`, you set `load_in_8bit` to `True`. You could also specify `load_in_4bit` to bring your memory requirements down even further to under 7GB.
|
||||||
|
|
||||||
|
## PixArtSigmaPipeline
|
||||||
|
|
||||||
|
[[autodoc]] PixArtSigmaPipeline
|
||||||
|
- all
|
||||||
|
- __call__
|
||||||
@@ -177,7 +177,7 @@ inpaint = StableDiffusionInpaintPipeline(**text2img.components)
|
|||||||
|
|
||||||
The Stable Diffusion pipelines are automatically supported in [Gradio](https://github.com/gradio-app/gradio/), a library that makes creating beautiful and user-friendly machine learning apps on the web a breeze. First, make sure you have Gradio installed:
|
The Stable Diffusion pipelines are automatically supported in [Gradio](https://github.com/gradio-app/gradio/), a library that makes creating beautiful and user-friendly machine learning apps on the web a breeze. First, make sure you have Gradio installed:
|
||||||
|
|
||||||
```
|
```sh
|
||||||
pip install -U gradio
|
pip install -U gradio
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
|
|||||||
import torch
|
import torch
|
||||||
|
|
||||||
repo_id = "stabilityai/stable-diffusion-2-base"
|
repo_id = "stabilityai/stable-diffusion-2-base"
|
||||||
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.float16, revision="fp16")
|
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.float16, variant="fp16")
|
||||||
|
|
||||||
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
||||||
pipe = pipe.to("cuda")
|
pipe = pipe.to("cuda")
|
||||||
@@ -72,7 +72,7 @@ init_image = load_image(img_url).resize((512, 512))
|
|||||||
mask_image = load_image(mask_url).resize((512, 512))
|
mask_image = load_image(mask_url).resize((512, 512))
|
||||||
|
|
||||||
repo_id = "stabilityai/stable-diffusion-2-inpainting"
|
repo_id = "stabilityai/stable-diffusion-2-inpainting"
|
||||||
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.float16, revision="fp16")
|
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.float16, variant="fp16")
|
||||||
|
|
||||||
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
||||||
pipe = pipe.to("cuda")
|
pipe = pipe.to("cuda")
|
||||||
|
|||||||
@@ -0,0 +1,315 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Stable Diffusion 3
|
||||||
|
|
||||||
|
Stable Diffusion 3 (SD3) was proposed in [Scaling Rectified Flow Transformers for High-Resolution Image Synthesis](https://arxiv.org/pdf/2403.03206.pdf) by Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Muller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, Dustin Podell, Tim Dockhorn, Zion English, Kyle Lacey, Alex Goodwin, Yannik Marek, and Robin Rombach.
|
||||||
|
|
||||||
|
The abstract from the paper is:
|
||||||
|
|
||||||
|
*Diffusion models create data from noise by inverting the forward paths of data towards noise and have emerged as a powerful generative modeling technique for high-dimensional, perceptual data such as images and videos. Rectified flow is a recent generative model formulation that connects data and noise in a straight line. Despite its better theoretical properties and conceptual simplicity, it is not yet decisively established as standard practice. In this work, we improve existing noise sampling techniques for training rectified flow models by biasing them towards perceptually relevant scales. Through a large-scale study, we demonstrate the superior performance of this approach compared to established diffusion formulations for high-resolution text-to-image synthesis. Additionally, we present a novel transformer-based architecture for text-to-image generation that uses separate weights for the two modalities and enables a bidirectional flow of information between image and text tokens, improving text comprehension typography, and human preference ratings. We demonstrate that this architecture follows predictable scaling trends and correlates lower validation loss to improved text-to-image synthesis as measured by various metrics and human evaluations.*
|
||||||
|
|
||||||
|
|
||||||
|
## Usage Example
|
||||||
|
|
||||||
|
_As the model is gated, before using it with diffusers you first need to go to the [Stable Diffusion 3 Medium Hugging Face page](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers), fill in the form and accept the gate. Once you are in, you need to login so that your system knows you’ve accepted the gate._
|
||||||
|
|
||||||
|
Use the command below to log in:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
huggingface-cli login
|
||||||
|
```
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
The SD3 pipeline uses three text encoders to generate an image. Model offloading is necessary in order for it to run on most commodity hardware. Please use the `torch.float16` data type for additional memory savings.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
from diffusers import StableDiffusion3Pipeline
|
||||||
|
|
||||||
|
pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16)
|
||||||
|
pipe.to("cuda")
|
||||||
|
|
||||||
|
image = pipe(
|
||||||
|
prompt="a photo of a cat holding a sign that says hello world",
|
||||||
|
negative_prompt="",
|
||||||
|
num_inference_steps=28,
|
||||||
|
height=1024,
|
||||||
|
width=1024,
|
||||||
|
guidance_scale=7.0,
|
||||||
|
).images[0]
|
||||||
|
|
||||||
|
image.save("sd3_hello_world.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Memory Optimisations for SD3
|
||||||
|
|
||||||
|
SD3 uses three text encoders, one if which is the very large T5-XXL model. This makes it challenging to run the model on GPUs with less than 24GB of VRAM, even when using `fp16` precision. The following section outlines a few memory optimizations in Diffusers that make it easier to run SD3 on low resource hardware.
|
||||||
|
|
||||||
|
### Running Inference with Model Offloading
|
||||||
|
|
||||||
|
The most basic memory optimization available in Diffusers allows you to offload the components of the model to CPU during inference in order to save memory, while seeing a slight increase in inference latency. Model offloading will only move a model component onto the GPU when it needs to be executed, while keeping the remaining components on the CPU.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
from diffusers import StableDiffusion3Pipeline
|
||||||
|
|
||||||
|
pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16)
|
||||||
|
pipe.enable_model_cpu_offload()
|
||||||
|
|
||||||
|
image = pipe(
|
||||||
|
prompt="a photo of a cat holding a sign that says hello world",
|
||||||
|
negative_prompt="",
|
||||||
|
num_inference_steps=28,
|
||||||
|
height=1024,
|
||||||
|
width=1024,
|
||||||
|
guidance_scale=7.0,
|
||||||
|
).images[0]
|
||||||
|
|
||||||
|
image.save("sd3_hello_world.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Dropping the T5 Text Encoder during Inference
|
||||||
|
|
||||||
|
Removing the memory-intensive 4.7B parameter T5-XXL text encoder during inference can significantly decrease the memory requirements for SD3 with only a slight loss in performance.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
from diffusers import StableDiffusion3Pipeline
|
||||||
|
|
||||||
|
pipe = StableDiffusion3Pipeline.from_pretrained(
|
||||||
|
"stabilityai/stable-diffusion-3-medium-diffusers",
|
||||||
|
text_encoder_3=None,
|
||||||
|
tokenizer_3=None,
|
||||||
|
torch_dtype=torch.float16
|
||||||
|
)
|
||||||
|
pipe.to("cuda")
|
||||||
|
|
||||||
|
image = pipe(
|
||||||
|
prompt="a photo of a cat holding a sign that says hello world",
|
||||||
|
negative_prompt="",
|
||||||
|
num_inference_steps=28,
|
||||||
|
height=1024,
|
||||||
|
width=1024,
|
||||||
|
guidance_scale=7.0,
|
||||||
|
).images[0]
|
||||||
|
|
||||||
|
image.save("sd3_hello_world-no-T5.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using a Quantized Version of the T5 Text Encoder
|
||||||
|
|
||||||
|
We can leverage the `bitsandbytes` library to load and quantize the T5-XXL text encoder to 8-bit precision. This allows you to keep using all three text encoders while only slightly impacting performance.
|
||||||
|
|
||||||
|
First install the `bitsandbytes` library.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
pip install bitsandbytes
|
||||||
|
```
|
||||||
|
|
||||||
|
Then load the T5-XXL model using the `BitsAndBytesConfig`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
from diffusers import StableDiffusion3Pipeline
|
||||||
|
from transformers import T5EncoderModel, BitsAndBytesConfig
|
||||||
|
|
||||||
|
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
||||||
|
|
||||||
|
model_id = "stabilityai/stable-diffusion-3-medium-diffusers"
|
||||||
|
text_encoder = T5EncoderModel.from_pretrained(
|
||||||
|
model_id,
|
||||||
|
subfolder="text_encoder_3",
|
||||||
|
quantization_config=quantization_config,
|
||||||
|
)
|
||||||
|
pipe = StableDiffusion3Pipeline.from_pretrained(
|
||||||
|
model_id,
|
||||||
|
text_encoder_3=text_encoder,
|
||||||
|
device_map="balanced",
|
||||||
|
torch_dtype=torch.float16
|
||||||
|
)
|
||||||
|
|
||||||
|
image = pipe(
|
||||||
|
prompt="a photo of a cat holding a sign that says hello world",
|
||||||
|
negative_prompt="",
|
||||||
|
num_inference_steps=28,
|
||||||
|
height=1024,
|
||||||
|
width=1024,
|
||||||
|
guidance_scale=7.0,
|
||||||
|
).images[0]
|
||||||
|
|
||||||
|
image.save("sd3_hello_world-8bit-T5.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
You can find the end-to-end script [here](https://gist.github.com/sayakpaul/82acb5976509851f2db1a83456e504f1).
|
||||||
|
|
||||||
|
## Performance Optimizations for SD3
|
||||||
|
|
||||||
|
### Using Torch Compile to Speed Up Inference
|
||||||
|
|
||||||
|
Using compiled components in the SD3 pipeline can speed up inference by as much as 4X. The following code snippet demonstrates how to compile the Transformer and VAE components of the SD3 pipeline.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
from diffusers import StableDiffusion3Pipeline
|
||||||
|
|
||||||
|
torch.set_float32_matmul_precision("high")
|
||||||
|
|
||||||
|
torch._inductor.config.conv_1x1_as_mm = True
|
||||||
|
torch._inductor.config.coordinate_descent_tuning = True
|
||||||
|
torch._inductor.config.epilogue_fusion = False
|
||||||
|
torch._inductor.config.coordinate_descent_check_all_directions = True
|
||||||
|
|
||||||
|
pipe = StableDiffusion3Pipeline.from_pretrained(
|
||||||
|
"stabilityai/stable-diffusion-3-medium-diffusers",
|
||||||
|
torch_dtype=torch.float16
|
||||||
|
).to("cuda")
|
||||||
|
pipe.set_progress_bar_config(disable=True)
|
||||||
|
|
||||||
|
pipe.transformer.to(memory_format=torch.channels_last)
|
||||||
|
pipe.vae.to(memory_format=torch.channels_last)
|
||||||
|
|
||||||
|
pipe.transformer = torch.compile(pipe.transformer, mode="max-autotune", fullgraph=True)
|
||||||
|
pipe.vae.decode = torch.compile(pipe.vae.decode, mode="max-autotune", fullgraph=True)
|
||||||
|
|
||||||
|
# Warm Up
|
||||||
|
prompt = "a photo of a cat holding a sign that says hello world"
|
||||||
|
for _ in range(3):
|
||||||
|
_ = pipe(prompt=prompt, generator=torch.manual_seed(1))
|
||||||
|
|
||||||
|
# Run Inference
|
||||||
|
image = pipe(prompt=prompt, generator=torch.manual_seed(1)).images[0]
|
||||||
|
image.save("sd3_hello_world.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
Check out the full script [here](https://gist.github.com/sayakpaul/508d89d7aad4f454900813da5d42ca97).
|
||||||
|
|
||||||
|
## Using Long Prompts with the T5 Text Encoder
|
||||||
|
|
||||||
|
By default, the T5 Text Encoder prompt uses a maximum sequence length of `256`. This can be adjusted by setting the `max_sequence_length` to accept fewer or more tokens. Keep in mind that longer sequences require additional resources and result in longer generation times, such as during batch inference.
|
||||||
|
|
||||||
|
```python
|
||||||
|
prompt = "A whimsical and creative image depicting a hybrid creature that is a mix of a waffle and a hippopotamus, basking in a river of melted butter amidst a breakfast-themed landscape. It features the distinctive, bulky body shape of a hippo. However, instead of the usual grey skin, the creature’s body resembles a golden-brown, crispy waffle fresh off the griddle. The skin is textured with the familiar grid pattern of a waffle, each square filled with a glistening sheen of syrup. The environment combines the natural habitat of a hippo with elements of a breakfast table setting, a river of warm, melted butter, with oversized utensils or plates peeking out from the lush, pancake-like foliage in the background, a towering pepper mill standing in for a tree. As the sun rises in this fantastical world, it casts a warm, buttery glow over the scene. The creature, content in its butter river, lets out a yawn. Nearby, a flock of birds take flight"
|
||||||
|
|
||||||
|
image = pipe(
|
||||||
|
prompt=prompt,
|
||||||
|
negative_prompt="",
|
||||||
|
num_inference_steps=28,
|
||||||
|
guidance_scale=4.5,
|
||||||
|
max_sequence_length=512,
|
||||||
|
).images[0]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Sending a different prompt to the T5 Text Encoder
|
||||||
|
|
||||||
|
You can send a different prompt to the CLIP Text Encoders and the T5 Text Encoder to prevent the prompt from being truncated by the CLIP Text Encoders and to improve generation.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
The prompt with the CLIP Text Encoders is still truncated to the 77 token limit.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
```python
|
||||||
|
prompt = "A whimsical and creative image depicting a hybrid creature that is a mix of a waffle and a hippopotamus, basking in a river of melted butter amidst a breakfast-themed landscape. A river of warm, melted butter, pancake-like foliage in the background, a towering pepper mill standing in for a tree."
|
||||||
|
|
||||||
|
prompt_3 = "A whimsical and creative image depicting a hybrid creature that is a mix of a waffle and a hippopotamus, basking in a river of melted butter amidst a breakfast-themed landscape. It features the distinctive, bulky body shape of a hippo. However, instead of the usual grey skin, the creature’s body resembles a golden-brown, crispy waffle fresh off the griddle. The skin is textured with the familiar grid pattern of a waffle, each square filled with a glistening sheen of syrup. The environment combines the natural habitat of a hippo with elements of a breakfast table setting, a river of warm, melted butter, with oversized utensils or plates peeking out from the lush, pancake-like foliage in the background, a towering pepper mill standing in for a tree. As the sun rises in this fantastical world, it casts a warm, buttery glow over the scene. The creature, content in its butter river, lets out a yawn. Nearby, a flock of birds take flight"
|
||||||
|
|
||||||
|
image = pipe(
|
||||||
|
prompt=prompt,
|
||||||
|
prompt_3=prompt_3,
|
||||||
|
negative_prompt="",
|
||||||
|
num_inference_steps=28,
|
||||||
|
guidance_scale=4.5,
|
||||||
|
max_sequence_length=512,
|
||||||
|
).images[0]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Tiny AutoEncoder for Stable Diffusion 3
|
||||||
|
|
||||||
|
Tiny AutoEncoder for Stable Diffusion (TAESD3) is a tiny distilled version of Stable Diffusion 3's VAE by [Ollin Boer Bohan](https://github.com/madebyollin/taesd) that can decode [`StableDiffusion3Pipeline`] latents almost instantly.
|
||||||
|
|
||||||
|
To use with Stable Diffusion 3:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
from diffusers import StableDiffusion3Pipeline, AutoencoderTiny
|
||||||
|
|
||||||
|
pipe = StableDiffusion3Pipeline.from_pretrained(
|
||||||
|
"stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16
|
||||||
|
)
|
||||||
|
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesd3", torch_dtype=torch.float16)
|
||||||
|
pipe = pipe.to("cuda")
|
||||||
|
|
||||||
|
prompt = "slice of delicious New York-style berry cheesecake"
|
||||||
|
image = pipe(prompt, num_inference_steps=25).images[0]
|
||||||
|
image.save("cheesecake.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Loading the original checkpoints via `from_single_file`
|
||||||
|
|
||||||
|
The `SD3Transformer2DModel` and `StableDiffusion3Pipeline` classes support loading the original checkpoints via the `from_single_file` method. This method allows you to load the original checkpoint files that were used to train the models.
|
||||||
|
|
||||||
|
## Loading the original checkpoints for the `SD3Transformer2DModel`
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import SD3Transformer2DModel
|
||||||
|
|
||||||
|
model = SD3Transformer2DModel.from_single_file("https://huggingface.co/stabilityai/stable-diffusion-3-medium/blob/main/sd3_medium.safetensors")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Loading the single checkpoint for the `StableDiffusion3Pipeline`
|
||||||
|
|
||||||
|
### Loading the single file checkpoint without T5
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
from diffusers import StableDiffusion3Pipeline
|
||||||
|
|
||||||
|
pipe = StableDiffusion3Pipeline.from_single_file(
|
||||||
|
"https://huggingface.co/stabilityai/stable-diffusion-3-medium/blob/main/sd3_medium_incl_clips.safetensors",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
text_encoder_3=None
|
||||||
|
)
|
||||||
|
pipe.enable_model_cpu_offload()
|
||||||
|
|
||||||
|
image = pipe("a picture of a cat holding a sign that says hello world").images[0]
|
||||||
|
image.save('sd3-single-file.png')
|
||||||
|
```
|
||||||
|
|
||||||
|
### Loading the single file checkpoint with T5
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> The following example loads a checkpoint stored in a 8-bit floating point format which requires PyTorch 2.3 or later.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
from diffusers import StableDiffusion3Pipeline
|
||||||
|
|
||||||
|
pipe = StableDiffusion3Pipeline.from_single_file(
|
||||||
|
"https://huggingface.co/stabilityai/stable-diffusion-3-medium/blob/main/sd3_medium_incl_clips_t5xxlfp8.safetensors",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
)
|
||||||
|
pipe.enable_model_cpu_offload()
|
||||||
|
|
||||||
|
image = pipe("a picture of a cat holding a sign that says hello world").images[0]
|
||||||
|
image.save('sd3-single-file-t5-fp8.png')
|
||||||
|
```
|
||||||
|
|
||||||
|
## StableDiffusion3Pipeline
|
||||||
|
|
||||||
|
[[autodoc]] StableDiffusion3Pipeline
|
||||||
|
- all
|
||||||
|
- __call__
|
||||||
@@ -12,7 +12,7 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
# EDMDPMSolverMultistepScheduler
|
# EDMDPMSolverMultistepScheduler
|
||||||
|
|
||||||
`EDMDPMSolverMultistepScheduler` is a [Karras formulation](https://huggingface.co/papers/2206.00364) of `DPMSolverMultistep`, a multistep scheduler from [DPM-Solver: A Fast ODE Solver for Diffusion Probabilistic Model Sampling in Around 10 Steps](https://huggingface.co/papers/2206.00927) and [DPM-Solver++: Fast Solver for Guided Sampling of Diffusion Probabilistic Models](https://huggingface.co/papers/2211.01095) by Cheng Lu, Yuhao Zhou, Fan Bao, Jianfei Chen, Chongxuan Li, and Jun Zhu.
|
`EDMDPMSolverMultistepScheduler` is a [Karras formulation](https://huggingface.co/papers/2206.00364) of `DPMSolverMultistepScheduler`, a multistep scheduler from [DPM-Solver: A Fast ODE Solver for Diffusion Probabilistic Model Sampling in Around 10 Steps](https://huggingface.co/papers/2206.00927) and [DPM-Solver++: Fast Solver for Guided Sampling of Diffusion Probabilistic Models](https://huggingface.co/papers/2211.01095) by Cheng Lu, Yuhao Zhou, Fan Bao, Jianfei Chen, Chongxuan Li, and Jun Zhu.
|
||||||
|
|
||||||
DPMSolver (and the improved version DPMSolver++) is a fast dedicated high-order solver for diffusion ODEs with convergence order guarantee. Empirically, DPMSolver sampling with only 20 steps can generate high-quality
|
DPMSolver (and the improved version DPMSolver++) is a fast dedicated high-order solver for diffusion ODEs with convergence order guarantee. Empirically, DPMSolver sampling with only 20 steps can generate high-quality
|
||||||
samples, and it can generate quite good samples even in 10 steps.
|
samples, and it can generate quite good samples even in 10 steps.
|
||||||
|
|||||||
18
docs/source/en/api/schedulers/flow_match_euler_discrete.md
Normal file
18
docs/source/en/api/schedulers/flow_match_euler_discrete.md
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# FlowMatchEulerDiscreteScheduler
|
||||||
|
|
||||||
|
`FlowMatchEulerDiscreteScheduler` is based on the flow-matching sampling introduced in [Stable Diffusion 3](https://arxiv.org/abs/2403.03206).
|
||||||
|
|
||||||
|
## FlowMatchEulerDiscreteScheduler
|
||||||
|
[[autodoc]] FlowMatchEulerDiscreteScheduler
|
||||||
@@ -12,7 +12,7 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
# DPMSolverMultistepScheduler
|
# DPMSolverMultistepScheduler
|
||||||
|
|
||||||
`DPMSolverMultistep` is a multistep scheduler from [DPM-Solver: A Fast ODE Solver for Diffusion Probabilistic Model Sampling in Around 10 Steps](https://huggingface.co/papers/2206.00927) and [DPM-Solver++: Fast Solver for Guided Sampling of Diffusion Probabilistic Models](https://huggingface.co/papers/2211.01095) by Cheng Lu, Yuhao Zhou, Fan Bao, Jianfei Chen, Chongxuan Li, and Jun Zhu.
|
`DPMSolverMultistepScheduler` is a multistep scheduler from [DPM-Solver: A Fast ODE Solver for Diffusion Probabilistic Model Sampling in Around 10 Steps](https://huggingface.co/papers/2206.00927) and [DPM-Solver++: Fast Solver for Guided Sampling of Diffusion Probabilistic Models](https://huggingface.co/papers/2211.01095) by Cheng Lu, Yuhao Zhou, Fan Bao, Jianfei Chen, Chongxuan Li, and Jun Zhu.
|
||||||
|
|
||||||
DPMSolver (and the improved version DPMSolver++) is a fast dedicated high-order solver for diffusion ODEs with convergence order guarantee. Empirically, DPMSolver sampling with only 20 steps can generate high-quality
|
DPMSolver (and the improved version DPMSolver++) is a fast dedicated high-order solver for diffusion ODEs with convergence order guarantee. Empirically, DPMSolver sampling with only 20 steps can generate high-quality
|
||||||
samples, and it can generate quite good samples even in 10 steps.
|
samples, and it can generate quite good samples even in 10 steps.
|
||||||
|
|||||||
@@ -37,3 +37,7 @@ Utility and helper functions for working with 🤗 Diffusers.
|
|||||||
## make_image_grid
|
## make_image_grid
|
||||||
|
|
||||||
[[autodoc]] utils.make_image_grid
|
[[autodoc]] utils.make_image_grid
|
||||||
|
|
||||||
|
## randn_tensor
|
||||||
|
|
||||||
|
[[autodoc]] utils.torch_utils.randn_tensor
|
||||||
|
|||||||
21
docs/source/en/api/video_processor.md
Normal file
21
docs/source/en/api/video_processor.md
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Video Processor
|
||||||
|
|
||||||
|
The [`VideoProcessor`] provides a unified API for video pipelines to prepare inputs for VAE encoding and post-processing outputs once they're decoded. The class inherits [`VaeImageProcessor`] so it includes transformations such as resizing, normalization, and conversion between PIL Image, PyTorch, and NumPy arrays.
|
||||||
|
|
||||||
|
## VideoProcessor
|
||||||
|
|
||||||
|
[[autodoc]] video_processor.VideoProcessor.preprocess_video
|
||||||
|
|
||||||
|
[[autodoc]] video_processor.VideoProcessor.postprocess_video
|
||||||
@@ -22,14 +22,13 @@ We enormously value feedback from the community, so please do not be afraid to s
|
|||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
You can contribute in many ways ranging from answering questions on issues to adding new diffusion models to
|
You can contribute in many ways ranging from answering questions on issues and discussions to adding new diffusion models to the core library.
|
||||||
the core library.
|
|
||||||
|
|
||||||
In the following, we give an overview of different ways to contribute, ranked by difficulty in ascending order. All of them are valuable to the community.
|
In the following, we give an overview of different ways to contribute, ranked by difficulty in ascending order. All of them are valuable to the community.
|
||||||
|
|
||||||
* 1. Asking and answering questions on [the Diffusers discussion forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers) or on [Discord](https://discord.gg/G7tWnz98XR).
|
* 1. Asking and answering questions on [the Diffusers discussion forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers) or on [Discord](https://discord.gg/G7tWnz98XR).
|
||||||
* 2. Opening new issues on [the GitHub Issues tab](https://github.com/huggingface/diffusers/issues/new/choose).
|
* 2. Opening new issues on [the GitHub Issues tab](https://github.com/huggingface/diffusers/issues/new/choose) or new discussions on [the GitHub Discussions tab](https://github.com/huggingface/diffusers/discussions/new/choose).
|
||||||
* 3. Answering issues on [the GitHub Issues tab](https://github.com/huggingface/diffusers/issues).
|
* 3. Answering issues on [the GitHub Issues tab](https://github.com/huggingface/diffusers/issues) or discussions on [the GitHub Discussions tab](https://github.com/huggingface/diffusers/discussions).
|
||||||
* 4. Fix a simple issue, marked by the "Good first issue" label, see [here](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22).
|
* 4. Fix a simple issue, marked by the "Good first issue" label, see [here](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22).
|
||||||
* 5. Contribute to the [documentation](https://github.com/huggingface/diffusers/tree/main/docs/source).
|
* 5. Contribute to the [documentation](https://github.com/huggingface/diffusers/tree/main/docs/source).
|
||||||
* 6. Contribute a [Community Pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3Acommunity-examples).
|
* 6. Contribute a [Community Pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3Acommunity-examples).
|
||||||
@@ -63,7 +62,7 @@ In the same spirit, you are of immense help to the community by answering such q
|
|||||||
|
|
||||||
**Please** keep in mind that the more effort you put into asking or answering a question, the higher
|
**Please** keep in mind that the more effort you put into asking or answering a question, the higher
|
||||||
the quality of the publicly documented knowledge. In the same way, well-posed and well-answered questions create a high-quality knowledge database accessible to everybody, while badly posed questions or answers reduce the overall quality of the public knowledge database.
|
the quality of the publicly documented knowledge. In the same way, well-posed and well-answered questions create a high-quality knowledge database accessible to everybody, while badly posed questions or answers reduce the overall quality of the public knowledge database.
|
||||||
In short, a high quality question or answer is *precise*, *concise*, *relevant*, *easy-to-understand*, *accessible*, and *well-formated/well-posed*. For more information, please have a look through the [How to write a good issue](#how-to-write-a-good-issue) section.
|
In short, a high quality question or answer is *precise*, *concise*, *relevant*, *easy-to-understand*, *accessible*, and *well-formatted/well-posed*. For more information, please have a look through the [How to write a good issue](#how-to-write-a-good-issue) section.
|
||||||
|
|
||||||
**NOTE about channels**:
|
**NOTE about channels**:
|
||||||
[*The forum*](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) is much better indexed by search engines, such as Google. Posts are ranked by popularity rather than chronologically. Hence, it's easier to look up questions and answers that we posted some time ago.
|
[*The forum*](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) is much better indexed by search engines, such as Google. Posts are ranked by popularity rather than chronologically. Hence, it's easier to look up questions and answers that we posted some time ago.
|
||||||
@@ -99,7 +98,7 @@ This means in more detail:
|
|||||||
- Format your code.
|
- Format your code.
|
||||||
- Do not include any external libraries except for Diffusers depending on them.
|
- Do not include any external libraries except for Diffusers depending on them.
|
||||||
- **Always** provide all necessary information about your environment; for this, you can run: `diffusers-cli env` in your shell and copy-paste the displayed information to the issue.
|
- **Always** provide all necessary information about your environment; for this, you can run: `diffusers-cli env` in your shell and copy-paste the displayed information to the issue.
|
||||||
- Explain the issue. If the reader doesn't know what the issue is and why it is an issue, she cannot solve it.
|
- Explain the issue. If the reader doesn't know what the issue is and why it is an issue, (s)he cannot solve it.
|
||||||
- **Always** make sure the reader can reproduce your issue with as little effort as possible. If your code snippet cannot be run because of missing libraries or undefined variables, the reader cannot help you. Make sure your reproducible code snippet is as minimal as possible and can be copy-pasted into a simple Python shell.
|
- **Always** make sure the reader can reproduce your issue with as little effort as possible. If your code snippet cannot be run because of missing libraries or undefined variables, the reader cannot help you. Make sure your reproducible code snippet is as minimal as possible and can be copy-pasted into a simple Python shell.
|
||||||
- If in order to reproduce your issue a model and/or dataset is required, make sure the reader has access to that model or dataset. You can always upload your model or dataset to the [Hub](https://huggingface.co) to make it easily downloadable. Try to keep your model and dataset as small as possible, to make the reproduction of your issue as effortless as possible.
|
- If in order to reproduce your issue a model and/or dataset is required, make sure the reader has access to that model or dataset. You can always upload your model or dataset to the [Hub](https://huggingface.co) to make it easily downloadable. Try to keep your model and dataset as small as possible, to make the reproduction of your issue as effortless as possible.
|
||||||
|
|
||||||
@@ -198,38 +197,81 @@ Anything displayed on [the official Diffusers doc page](https://huggingface.co/d
|
|||||||
|
|
||||||
Please have a look at [this page](https://github.com/huggingface/diffusers/tree/main/docs) on how to verify changes made to the documentation locally.
|
Please have a look at [this page](https://github.com/huggingface/diffusers/tree/main/docs) on how to verify changes made to the documentation locally.
|
||||||
|
|
||||||
|
|
||||||
### 6. Contribute a community pipeline
|
### 6. Contribute a community pipeline
|
||||||
|
|
||||||
[Pipelines](https://huggingface.co/docs/diffusers/api/pipelines/overview) are usually the first point of contact between the Diffusers library and the user.
|
> [!TIP]
|
||||||
Pipelines are examples of how to use Diffusers [models](https://huggingface.co/docs/diffusers/api/models/overview) and [schedulers](https://huggingface.co/docs/diffusers/api/schedulers/overview).
|
> Read the [Community pipelines](../using-diffusers/custom_pipeline_overview#community-pipelines) guide to learn more about the difference between a GitHub and Hugging Face Hub community pipeline. If you're interested in why we have community pipelines, take a look at GitHub Issue [#841](https://github.com/huggingface/diffusers/issues/841) (basically, we can't maintain all the possible ways diffusion models can be used for inference but we also don't want to prevent the community from building them).
|
||||||
We support two types of pipelines:
|
|
||||||
|
|
||||||
- Official Pipelines
|
Contributing a community pipeline is a great way to share your creativity and work with the community. It lets you build on top of the [`DiffusionPipeline`] so that anyone can load and use it by setting the `custom_pipeline` parameter. This section will walk you through how to create a simple pipeline where the UNet only does a single forward pass and calls the scheduler once (a "one-step" pipeline).
|
||||||
- Community Pipelines
|
|
||||||
|
|
||||||
Both official and community pipelines follow the same design and consist of the same type of components.
|
1. Create a one_step_unet.py file for your community pipeline. This file can contain whatever package you want to use as long as it's installed by the user. Make sure you only have one pipeline class that inherits from [`DiffusionPipeline`] to load model weights and the scheduler configuration from the Hub. Add a UNet and scheduler to the `__init__` function.
|
||||||
|
|
||||||
Official pipelines are tested and maintained by the core maintainers of Diffusers. Their code
|
You should also add the `register_modules` function to ensure your pipeline and its components can be saved with [`~DiffusionPipeline.save_pretrained`].
|
||||||
resides in [src/diffusers/pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines).
|
|
||||||
In contrast, community pipelines are contributed and maintained purely by the **community** and are **not** tested.
|
|
||||||
They reside in [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) and while they can be accessed via the [PyPI diffusers package](https://pypi.org/project/diffusers/), their code is not part of the PyPI distribution.
|
|
||||||
|
|
||||||
The reason for the distinction is that the core maintainers of the Diffusers library cannot maintain and test all
|
```py
|
||||||
possible ways diffusion models can be used for inference, but some of them may be of interest to the community.
|
from diffusers import DiffusionPipeline
|
||||||
Officially released diffusion pipelines,
|
import torch
|
||||||
such as Stable Diffusion are added to the core src/diffusers/pipelines package which ensures
|
|
||||||
high quality of maintenance, no backward-breaking code changes, and testing.
|
|
||||||
More bleeding edge pipelines should be added as community pipelines. If usage for a community pipeline is high, the pipeline can be moved to the official pipelines upon request from the community. This is one of the ways we strive to be a community-driven library.
|
|
||||||
|
|
||||||
To add a community pipeline, one should add a <name-of-the-community>.py file to [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) and adapt the [examples/community/README.md](https://github.com/huggingface/diffusers/tree/main/examples/community/README.md) to include an example of the new pipeline.
|
class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
|
||||||
|
def __init__(self, unet, scheduler):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
An example can be seen [here](https://github.com/huggingface/diffusers/pull/2400).
|
self.register_modules(unet=unet, scheduler=scheduler)
|
||||||
|
```
|
||||||
|
|
||||||
Community pipeline PRs are only checked at a superficial level and ideally they should be maintained by their original authors.
|
1. In the forward pass (which we recommend defining as `__call__`), you can add any feature you'd like. For the "one-step" pipeline, create a random image and call the UNet and scheduler once by setting `timestep=1`.
|
||||||
|
|
||||||
Contributing a community pipeline is a great way to understand how Diffusers models and schedulers work. Having contributed a community pipeline is usually the first stepping stone to contributing an official pipeline to the
|
```py
|
||||||
core package.
|
from diffusers import DiffusionPipeline
|
||||||
|
import torch
|
||||||
|
|
||||||
|
class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
|
||||||
|
def __init__(self, unet, scheduler):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.register_modules(unet=unet, scheduler=scheduler)
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
image = torch.randn(
|
||||||
|
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
|
||||||
|
)
|
||||||
|
timestep = 1
|
||||||
|
|
||||||
|
model_output = self.unet(image, timestep).sample
|
||||||
|
scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample
|
||||||
|
|
||||||
|
return scheduler_output
|
||||||
|
```
|
||||||
|
|
||||||
|
Now you can run the pipeline by passing a UNet and scheduler to it or load pretrained weights if the pipeline structure is identical.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers import DDPMScheduler, UNet2DModel
|
||||||
|
|
||||||
|
scheduler = DDPMScheduler()
|
||||||
|
unet = UNet2DModel()
|
||||||
|
|
||||||
|
pipeline = UnetSchedulerOneForwardPipeline(unet=unet, scheduler=scheduler)
|
||||||
|
output = pipeline()
|
||||||
|
# load pretrained weights
|
||||||
|
pipeline = UnetSchedulerOneForwardPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True)
|
||||||
|
output = pipeline()
|
||||||
|
```
|
||||||
|
|
||||||
|
You can either share your pipeline as a GitHub community pipeline or Hub community pipeline.
|
||||||
|
|
||||||
|
<hfoptions id="pipeline type">
|
||||||
|
<hfoption id="GitHub pipeline">
|
||||||
|
|
||||||
|
Share your GitHub pipeline by opening a pull request on the Diffusers [repository](https://github.com/huggingface/diffusers) and add the one_step_unet.py file to the [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) subfolder.
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="Hub pipeline">
|
||||||
|
|
||||||
|
Share your Hub pipeline by creating a model repository on the Hub and uploading the one_step_unet.py file to it.
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
### 7. Contribute to training examples
|
### 7. Contribute to training examples
|
||||||
|
|
||||||
@@ -245,7 +287,7 @@ The official training examples are maintained by the Diffusers' core maintainers
|
|||||||
This is because of the same reasons put forward in [6. Contribute a community pipeline](#6-contribute-a-community-pipeline) for official pipelines vs. community pipelines: It is not feasible for the core maintainers to maintain all possible training methods for diffusion models.
|
This is because of the same reasons put forward in [6. Contribute a community pipeline](#6-contribute-a-community-pipeline) for official pipelines vs. community pipelines: It is not feasible for the core maintainers to maintain all possible training methods for diffusion models.
|
||||||
If the Diffusers core maintainers and the community consider a certain training paradigm to be too experimental or not popular enough, the corresponding training code should be put in the `research_projects` folder and maintained by the author.
|
If the Diffusers core maintainers and the community consider a certain training paradigm to be too experimental or not popular enough, the corresponding training code should be put in the `research_projects` folder and maintained by the author.
|
||||||
|
|
||||||
Both official training and research examples consist of a directory that contains one or more training scripts, a requirements.txt file, and a README.md file. In order for the user to make use of the
|
Both official training and research examples consist of a directory that contains one or more training scripts, a `requirements.txt` file, and a `README.md` file. In order for the user to make use of the
|
||||||
training examples, it is required to clone the repository:
|
training examples, it is required to clone the repository:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -255,7 +297,8 @@ git clone https://github.com/huggingface/diffusers
|
|||||||
as well as to install all additional dependencies required for training:
|
as well as to install all additional dependencies required for training:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install -r /examples/<your-example-folder>/requirements.txt
|
cd diffusers
|
||||||
|
pip install -r examples/<your-example-folder>/requirements.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
Therefore when adding an example, the `requirements.txt` file shall define all pip dependencies required for your training example so that once all those are installed, the user can run the example's training script. See, for example, the [DreamBooth `requirements.txt` file](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/requirements.txt).
|
Therefore when adding an example, the `requirements.txt` file shall define all pip dependencies required for your training example so that once all those are installed, the user can run the example's training script. See, for example, the [DreamBooth `requirements.txt` file](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/requirements.txt).
|
||||||
@@ -273,7 +316,7 @@ Once an example script works, please make sure to add a comprehensive `README.md
|
|||||||
- A link to some training results (logs, models, etc.) that show what the user can expect as shown [here](https://api.wandb.ai/report/patrickvonplaten/xm6cd5q5).
|
- A link to some training results (logs, models, etc.) that show what the user can expect as shown [here](https://api.wandb.ai/report/patrickvonplaten/xm6cd5q5).
|
||||||
- If you are adding a non-official/research training example, **please don't forget** to add a sentence that you are maintaining this training example which includes your git handle as shown [here](https://github.com/huggingface/diffusers/tree/main/examples/research_projects/intel_opts#diffusers-examples-with-intel-optimizations).
|
- If you are adding a non-official/research training example, **please don't forget** to add a sentence that you are maintaining this training example which includes your git handle as shown [here](https://github.com/huggingface/diffusers/tree/main/examples/research_projects/intel_opts#diffusers-examples-with-intel-optimizations).
|
||||||
|
|
||||||
If you are contributing to the official training examples, please also make sure to add a test to [examples/test_examples.py](https://github.com/huggingface/diffusers/blob/main/examples/test_examples.py). This is not necessary for non-official training examples.
|
If you are contributing to the official training examples, please also make sure to add a test to its folder such as [examples/dreambooth/test_dreambooth.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/test_dreambooth.py). This is not necessary for non-official training examples.
|
||||||
|
|
||||||
### 8. Fixing a "Good second issue"
|
### 8. Fixing a "Good second issue"
|
||||||
|
|
||||||
@@ -375,7 +418,7 @@ You will need basic `git` proficiency to be able to contribute to
|
|||||||
manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro
|
manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro
|
||||||
Git](https://git-scm.com/book/en/v2) is a very good reference.
|
Git](https://git-scm.com/book/en/v2) is a very good reference.
|
||||||
|
|
||||||
Follow these steps to start contributing ([supported Python versions](https://github.com/huggingface/diffusers/blob/main/setup.py#L244)):
|
Follow these steps to start contributing ([supported Python versions](https://github.com/huggingface/diffusers/blob/83bc6c94eaeb6f7704a2a428931cf2d9ad973ae9/setup.py#L270)):
|
||||||
|
|
||||||
1. Fork the [repository](https://github.com/huggingface/diffusers) by
|
1. Fork the [repository](https://github.com/huggingface/diffusers) by
|
||||||
clicking on the 'Fork' button on the repository's page. This creates a copy of the code
|
clicking on the 'Fork' button on the repository's page. This creates a copy of the code
|
||||||
|
|||||||
@@ -63,14 +63,14 @@ Let's walk through more in-detail design decisions for each class.
|
|||||||
Pipelines are designed to be easy to use (therefore do not follow [*Simple over easy*](#simple-over-easy) 100%), are not feature complete, and should loosely be seen as examples of how to use [models](#models) and [schedulers](#schedulers) for inference.
|
Pipelines are designed to be easy to use (therefore do not follow [*Simple over easy*](#simple-over-easy) 100%), are not feature complete, and should loosely be seen as examples of how to use [models](#models) and [schedulers](#schedulers) for inference.
|
||||||
|
|
||||||
The following design principles are followed:
|
The following design principles are followed:
|
||||||
- Pipelines follow the single-file policy. All pipelines can be found in individual directories under src/diffusers/pipelines. One pipeline folder corresponds to one diffusion paper/project/release. Multiple pipeline files can be gathered in one pipeline folder, as it’s done for [`src/diffusers/pipelines/stable-diffusion`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/stable_diffusion). If pipelines share similar functionality, one can make use of the [#Copied from mechanism](https://github.com/huggingface/diffusers/blob/125d783076e5bd9785beb05367a2d2566843a271/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L251).
|
- Pipelines follow the single-file policy. All pipelines can be found in individual directories under src/diffusers/pipelines. One pipeline folder corresponds to one diffusion paper/project/release. Multiple pipeline files can be gathered in one pipeline folder, as it’s done for [`src/diffusers/pipelines/stable-diffusion`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/stable_diffusion). If pipelines share similar functionality, one can make use of the [# Copied from mechanism](https://github.com/huggingface/diffusers/blob/125d783076e5bd9785beb05367a2d2566843a271/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L251).
|
||||||
- Pipelines all inherit from [`DiffusionPipeline`].
|
- Pipelines all inherit from [`DiffusionPipeline`].
|
||||||
- Every pipeline consists of different model and scheduler components, that are documented in the [`model_index.json` file](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/model_index.json), are accessible under the same name as attributes of the pipeline and can be shared between pipelines with [`DiffusionPipeline.components`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.components) function.
|
- Every pipeline consists of different model and scheduler components, that are documented in the [`model_index.json` file](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/model_index.json), are accessible under the same name as attributes of the pipeline and can be shared between pipelines with [`DiffusionPipeline.components`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.components) function.
|
||||||
- Every pipeline should be loadable via the [`DiffusionPipeline.from_pretrained`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained) function.
|
- Every pipeline should be loadable via the [`DiffusionPipeline.from_pretrained`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained) function.
|
||||||
- Pipelines should be used **only** for inference.
|
- Pipelines should be used **only** for inference.
|
||||||
- Pipelines should be very readable, self-explanatory, and easy to tweak.
|
- Pipelines should be very readable, self-explanatory, and easy to tweak.
|
||||||
- Pipelines should be designed to build on top of each other and be easy to integrate into higher-level APIs.
|
- Pipelines should be designed to build on top of each other and be easy to integrate into higher-level APIs.
|
||||||
- Pipelines are **not** intended to be feature-complete user interfaces. For future complete user interfaces one should rather have a look at [InvokeAI](https://github.com/invoke-ai/InvokeAI), [Diffuzers](https://github.com/abhishekkrthakur/diffuzers), and [lama-cleaner](https://github.com/Sanster/lama-cleaner).
|
- Pipelines are **not** intended to be feature-complete user interfaces. For feature-complete user interfaces one should rather have a look at [InvokeAI](https://github.com/invoke-ai/InvokeAI), [Diffuzers](https://github.com/abhishekkrthakur/diffuzers), and [lama-cleaner](https://github.com/Sanster/lama-cleaner).
|
||||||
- Every pipeline should have one and only one way to run it via a `__call__` method. The naming of the `__call__` arguments should be shared across all pipelines.
|
- Every pipeline should have one and only one way to run it via a `__call__` method. The naming of the `__call__` arguments should be shared across all pipelines.
|
||||||
- Pipelines should be named after the task they are intended to solve.
|
- Pipelines should be named after the task they are intended to solve.
|
||||||
- In almost all cases, novel diffusion pipelines shall be implemented in a new pipeline folder/file.
|
- In almost all cases, novel diffusion pipelines shall be implemented in a new pipeline folder/file.
|
||||||
@@ -81,7 +81,7 @@ Models are designed as configurable toolboxes that are natural extensions of [Py
|
|||||||
|
|
||||||
The following design principles are followed:
|
The following design principles are followed:
|
||||||
- Models correspond to **a type of model architecture**. *E.g.* the [`UNet2DConditionModel`] class is used for all UNet variations that expect 2D image inputs and are conditioned on some context.
|
- Models correspond to **a type of model architecture**. *E.g.* the [`UNet2DConditionModel`] class is used for all UNet variations that expect 2D image inputs and are conditioned on some context.
|
||||||
- All models can be found in [`src/diffusers/models`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models) and every model architecture shall be defined in its file, e.g. [`unet_2d_condition.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py), [`transformer_2d.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/transformer_2d.py), etc...
|
- All models can be found in [`src/diffusers/models`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models) and every model architecture shall be defined in its file, e.g. [`unets/unet_2d_condition.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unets/unet_2d_condition.py), [`transformers/transformer_2d.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/transformers/transformer_2d.py), etc...
|
||||||
- Models **do not** follow the single-file policy and should make use of smaller model building blocks, such as [`attention.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py), [`resnet.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/resnet.py), [`embeddings.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/embeddings.py), etc... **Note**: This is in stark contrast to Transformers' modeling files and shows that models do not really follow the single-file policy.
|
- Models **do not** follow the single-file policy and should make use of smaller model building blocks, such as [`attention.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py), [`resnet.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/resnet.py), [`embeddings.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/embeddings.py), etc... **Note**: This is in stark contrast to Transformers' modeling files and shows that models do not really follow the single-file policy.
|
||||||
- Models intend to expose complexity, just like PyTorch's `Module` class, and give clear error messages.
|
- Models intend to expose complexity, just like PyTorch's `Module` class, and give clear error messages.
|
||||||
- Models all inherit from `ModelMixin` and `ConfigMixin`.
|
- Models all inherit from `ModelMixin` and `ConfigMixin`.
|
||||||
@@ -90,7 +90,7 @@ The following design principles are followed:
|
|||||||
- To integrate new model checkpoints whose general architecture can be classified as an architecture that already exists in Diffusers, the existing model architecture shall be adapted to make it work with the new checkpoint. One should only create a new file if the model architecture is fundamentally different.
|
- To integrate new model checkpoints whose general architecture can be classified as an architecture that already exists in Diffusers, the existing model architecture shall be adapted to make it work with the new checkpoint. One should only create a new file if the model architecture is fundamentally different.
|
||||||
- Models should be designed to be easily extendable to future changes. This can be achieved by limiting public function arguments, configuration arguments, and "foreseeing" future changes, *e.g.* it is usually better to add `string` "...type" arguments that can easily be extended to new future types instead of boolean `is_..._type` arguments. Only the minimum amount of changes shall be made to existing architectures to make a new model checkpoint work.
|
- Models should be designed to be easily extendable to future changes. This can be achieved by limiting public function arguments, configuration arguments, and "foreseeing" future changes, *e.g.* it is usually better to add `string` "...type" arguments that can easily be extended to new future types instead of boolean `is_..._type` arguments. Only the minimum amount of changes shall be made to existing architectures to make a new model checkpoint work.
|
||||||
- The model design is a difficult trade-off between keeping code readable and concise and supporting many model checkpoints. For most parts of the modeling code, classes shall be adapted for new model checkpoints, while there are some exceptions where it is preferred to add new classes to make sure the code is kept concise and
|
- The model design is a difficult trade-off between keeping code readable and concise and supporting many model checkpoints. For most parts of the modeling code, classes shall be adapted for new model checkpoints, while there are some exceptions where it is preferred to add new classes to make sure the code is kept concise and
|
||||||
readable long-term, such as [UNet blocks](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_blocks.py) and [Attention processors](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
readable long-term, such as [UNet blocks](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unets/unet_2d_blocks.py) and [Attention processors](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
||||||
|
|
||||||
### Schedulers
|
### Schedulers
|
||||||
|
|
||||||
@@ -100,9 +100,9 @@ The following design principles are followed:
|
|||||||
- All schedulers are found in [`src/diffusers/schedulers`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers).
|
- All schedulers are found in [`src/diffusers/schedulers`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers).
|
||||||
- Schedulers are **not** allowed to import from large utils files and shall be kept very self-contained.
|
- Schedulers are **not** allowed to import from large utils files and shall be kept very self-contained.
|
||||||
- One scheduler Python file corresponds to one scheduler algorithm (as might be defined in a paper).
|
- One scheduler Python file corresponds to one scheduler algorithm (as might be defined in a paper).
|
||||||
- If schedulers share similar functionalities, we can make use of the `#Copied from` mechanism.
|
- If schedulers share similar functionalities, we can make use of the `# Copied from` mechanism.
|
||||||
- Schedulers all inherit from `SchedulerMixin` and `ConfigMixin`.
|
- Schedulers all inherit from `SchedulerMixin` and `ConfigMixin`.
|
||||||
- Schedulers can be easily swapped out with the [`ConfigMixin.from_config`](https://huggingface.co/docs/diffusers/main/en/api/configuration#diffusers.ConfigMixin.from_config) method as explained in detail [here](../using-diffusers/schedulers.md).
|
- Schedulers can be easily swapped out with the [`ConfigMixin.from_config`](https://huggingface.co/docs/diffusers/main/en/api/configuration#diffusers.ConfigMixin.from_config) method as explained in detail [here](../using-diffusers/schedulers).
|
||||||
- Every scheduler has to have a `set_num_inference_steps`, and a `step` function. `set_num_inference_steps(...)` has to be called before every denoising process, *i.e.* before `step(...)` is called.
|
- Every scheduler has to have a `set_num_inference_steps`, and a `step` function. `set_num_inference_steps(...)` has to be called before every denoising process, *i.e.* before `step(...)` is called.
|
||||||
- Every scheduler exposes the timesteps to be "looped over" via a `timesteps` attribute, which is an array of timesteps the model will be called upon.
|
- Every scheduler exposes the timesteps to be "looped over" via a `timesteps` attribute, which is an array of timesteps the model will be called upon.
|
||||||
- The `step(...)` function takes a predicted model output and the "current" sample (x_t) and returns the "previous", slightly more denoised sample (x_t-1).
|
- The `step(...)` function takes a predicted model output and the "current" sample (x_t) and returns the "previous", slightly more denoised sample (x_t-1).
|
||||||
|
|||||||
@@ -112,7 +112,7 @@ pip install -e ".[flax]"
|
|||||||
|
|
||||||
These commands will link the folder you cloned the repository to and your Python library paths.
|
These commands will link the folder you cloned the repository to and your Python library paths.
|
||||||
Python will now look inside the folder you cloned to in addition to the normal library paths.
|
Python will now look inside the folder you cloned to in addition to the normal library paths.
|
||||||
For example, if your Python packages are typically installed in `~/anaconda3/envs/main/lib/python3.8/site-packages/`, Python will also search the `~/diffusers/` folder you cloned to.
|
For example, if your Python packages are typically installed in `~/anaconda3/envs/main/lib/python3.10/site-packages/`, Python will also search the `~/diffusers/` folder you cloned to.
|
||||||
|
|
||||||
<Tip warning={true}>
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
|||||||
@@ -12,27 +12,23 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
# Speed up inference
|
# Speed up inference
|
||||||
|
|
||||||
There are several ways to optimize 🤗 Diffusers for inference speed. As a general rule of thumb, we recommend using either [xFormers](xformers) or `torch.nn.functional.scaled_dot_product_attention` in PyTorch 2.0 for their memory-efficient attention.
|
There are several ways to optimize Diffusers for inference speed, such as reducing the computational burden by lowering the data precision or using a lightweight distilled model. There are also memory-efficient attention implementations, [xFormers](xformers) and [scaled dot product attention](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) in PyTorch 2.0, that reduce memory usage which also indirectly speeds up inference. Different speed optimizations can be stacked together to get the fastest inference times.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Optimizing for inference speed or reduced memory usage can lead to improved performance in the other category, so you should try to optimize for both whenever you can. This guide focuses on inference speed, but you can learn more about lowering memory usage in the [Reduce memory usage](memory) guide.
|
||||||
|
|
||||||
In many cases, optimizing for speed or memory leads to improved performance in the other, so you should try to optimize for both whenever you can. This guide focuses on inference speed, but you can learn more about preserving memory in the [Reduce memory usage](memory) guide.
|
The inference times below are obtained from generating a single 512x512 image from the prompt "a photo of an astronaut riding a horse on mars" with 50 DDIM steps on a NVIDIA A100.
|
||||||
|
|
||||||
</Tip>
|
| setup | latency | speed-up |
|
||||||
|
|----------|---------|----------|
|
||||||
|
| baseline | 5.27s | x1 |
|
||||||
|
| tf32 | 4.14s | x1.27 |
|
||||||
|
| fp16 | 3.51s | x1.50 |
|
||||||
|
| combined | 3.41s | x1.54 |
|
||||||
|
|
||||||
The results below are obtained from generating a single 512x512 image from the prompt `a photo of an astronaut riding a horse on mars` with 50 DDIM steps on a Nvidia Titan RTX, demonstrating the speed-up you can expect.
|
## TensorFloat-32
|
||||||
|
|
||||||
| | latency | speed-up |
|
On Ampere and later CUDA devices, matrix multiplications and convolutions can use the [TensorFloat-32 (tf32)](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) mode for faster, but slightly less accurate computations. By default, PyTorch enables tf32 mode for convolutions but not matrix multiplications. Unless your network requires full float32 precision, we recommend enabling tf32 for matrix multiplications. It can significantly speed up computations with typically negligible loss in numerical accuracy.
|
||||||
| ---------------- | ------- | ------- |
|
|
||||||
| original | 9.50s | x1 |
|
|
||||||
| fp16 | 3.61s | x2.63 |
|
|
||||||
| channels last | 3.30s | x2.88 |
|
|
||||||
| traced UNet | 3.21s | x2.96 |
|
|
||||||
| memory efficient attention | 2.63s | x3.61 |
|
|
||||||
|
|
||||||
## Use TensorFloat-32
|
|
||||||
|
|
||||||
On Ampere and later CUDA devices, matrix multiplications and convolutions can use the [TensorFloat-32 (TF32)](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) mode for faster, but slightly less accurate computations. By default, PyTorch enables TF32 mode for convolutions but not matrix multiplications. Unless your network requires full float32 precision, we recommend enabling TF32 for matrix multiplications. It can significantly speeds up computations with typically negligible loss in numerical accuracy.
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
@@ -40,11 +36,11 @@ import torch
|
|||||||
torch.backends.cuda.matmul.allow_tf32 = True
|
torch.backends.cuda.matmul.allow_tf32 = True
|
||||||
```
|
```
|
||||||
|
|
||||||
You can learn more about TF32 in the [Mixed precision training](https://huggingface.co/docs/transformers/en/perf_train_gpu_one#tf32) guide.
|
Learn more about tf32 in the [Mixed precision training](https://huggingface.co/docs/transformers/en/perf_train_gpu_one#tf32) guide.
|
||||||
|
|
||||||
## Half-precision weights
|
## Half-precision weights
|
||||||
|
|
||||||
To save GPU memory and get more speed, try loading and running the model weights directly in half-precision or float16:
|
To save GPU memory and get more speed, set `torch_dtype=torch.float16` to load and run the model weights directly with half-precision weights.
|
||||||
|
|
||||||
```Python
|
```Python
|
||||||
import torch
|
import torch
|
||||||
@@ -56,19 +52,76 @@ pipe = DiffusionPipeline.from_pretrained(
|
|||||||
use_safetensors=True,
|
use_safetensors=True,
|
||||||
)
|
)
|
||||||
pipe = pipe.to("cuda")
|
pipe = pipe.to("cuda")
|
||||||
|
|
||||||
prompt = "a photo of an astronaut riding a horse on mars"
|
|
||||||
image = pipe(prompt).images[0]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<Tip warning={true}>
|
> [!WARNING]
|
||||||
|
> Don't use [torch.autocast](https://pytorch.org/docs/stable/amp.html#torch.autocast) in any of the pipelines as it can lead to black images and is always slower than pure float16 precision.
|
||||||
Don't use [`torch.autocast`](https://pytorch.org/docs/stable/amp.html#torch.autocast) in any of the pipelines as it can lead to black images and is always slower than pure float16 precision.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## Distilled model
|
## Distilled model
|
||||||
|
|
||||||
You could also use a distilled Stable Diffusion model and autoencoder to speed up inference. During distillation, many of the UNet's residual and attention blocks are shed to reduce the model size. The distilled model is faster and uses less memory while generating images of comparable quality to the full Stable Diffusion model.
|
You could also use a distilled Stable Diffusion model and autoencoder to speed up inference. During distillation, many of the UNet's residual and attention blocks are shed to reduce the model size by 51% and improve latency on CPU/GPU by 43%. The distilled model is faster and uses less memory while generating images of comparable quality to the full Stable Diffusion model.
|
||||||
|
|
||||||
Learn more about in the [Distilled Stable Diffusion inference](../using-diffusers/distilled_sd) guide!
|
> [!TIP]
|
||||||
|
> Read the [Open-sourcing Knowledge Distillation Code and Weights of SD-Small and SD-Tiny](https://huggingface.co/blog/sd_distillation) blog post to learn more about how knowledge distillation training works to produce a faster, smaller, and cheaper generative model.
|
||||||
|
|
||||||
|
The inference times below are obtained from generating 4 images from the prompt "a photo of an astronaut riding a horse on mars" with 25 PNDM steps on a NVIDIA A100. Each generation is repeated 3 times with the distilled Stable Diffusion v1.4 model by [Nota AI](https://hf.co/nota-ai).
|
||||||
|
|
||||||
|
| setup | latency | speed-up |
|
||||||
|
|------------------------------|---------|----------|
|
||||||
|
| baseline | 6.37s | x1 |
|
||||||
|
| distilled | 4.18s | x1.52 |
|
||||||
|
| distilled + tiny autoencoder | 3.83s | x1.66 |
|
||||||
|
|
||||||
|
Let's load the distilled Stable Diffusion model and compare it against the original Stable Diffusion model.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers import StableDiffusionPipeline
|
||||||
|
import torch
|
||||||
|
|
||||||
|
distilled = StableDiffusionPipeline.from_pretrained(
|
||||||
|
"nota-ai/bk-sdm-small", torch_dtype=torch.float16, use_safetensors=True,
|
||||||
|
).to("cuda")
|
||||||
|
prompt = "a golden vase with different flowers"
|
||||||
|
generator = torch.manual_seed(2023)
|
||||||
|
image = distilled("a golden vase with different flowers", num_inference_steps=25, generator=generator).images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex gap-4">
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/original_sd.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">original Stable Diffusion</figcaption>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/distilled_sd.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">distilled Stable Diffusion</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
### Tiny AutoEncoder
|
||||||
|
|
||||||
|
To speed inference up even more, replace the autoencoder with a [distilled version](https://huggingface.co/sayakpaul/taesdxl-diffusers) of it.
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from diffusers import AutoencoderTiny, StableDiffusionPipeline
|
||||||
|
|
||||||
|
distilled = StableDiffusionPipeline.from_pretrained(
|
||||||
|
"nota-ai/bk-sdm-small", torch_dtype=torch.float16, use_safetensors=True,
|
||||||
|
).to("cuda")
|
||||||
|
distilled.vae = AutoencoderTiny.from_pretrained(
|
||||||
|
"sayakpaul/taesd-diffusers", torch_dtype=torch.float16, use_safetensors=True,
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
prompt = "a golden vase with different flowers"
|
||||||
|
generator = torch.manual_seed(2023)
|
||||||
|
image = distilled("a golden vase with different flowers", num_inference_steps=25, generator=generator).images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex justify-center">
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/distilled_sd_vae.png" />
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">distilled Stable Diffusion + Tiny AutoEncoder</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|||||||
@@ -261,7 +261,7 @@ from dataclasses import dataclass
|
|||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class UNet2DConditionOutput:
|
class UNet2DConditionOutput:
|
||||||
sample: torch.FloatTensor
|
sample: torch.Tensor
|
||||||
|
|
||||||
|
|
||||||
pipe = StableDiffusionPipeline.from_pretrained(
|
pipe = StableDiffusionPipeline.from_pretrained(
|
||||||
|
|||||||
@@ -1,17 +0,0 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Overview
|
|
||||||
|
|
||||||
Generating high-quality outputs is computationally intensive, especially during each iterative step where you go from a noisy output to a less noisy output. One of 🤗 Diffuser's goals is to make this technology widely accessible to everyone, which includes enabling fast inference on consumer and specialized hardware.
|
|
||||||
|
|
||||||
This section will cover tips and tricks - like half-precision weights and sliced attention - for optimizing inference speed and reducing memory-consumption. You'll also learn how to speed up your PyTorch code with [`torch.compile`](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) or [ONNX Runtime](https://onnxruntime.ai/docs/), and enable memory-efficient attention with [xFormers](https://facebookresearch.github.io/xformers/). There are also guides for running inference on specific hardware like Apple Silicon, and Intel or Habana processors.
|
|
||||||
@@ -6,7 +6,7 @@ Before you begin, make sure you install T-GATE.
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install tgate
|
pip install tgate
|
||||||
pip install -U pytorch diffusers transformers accelerate DeepCache
|
pip install -U torch diffusers transformers accelerate DeepCache
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
@@ -46,7 +46,7 @@ pipe = TgatePixArtLoader(
|
|||||||
|
|
||||||
image = pipe.tgate(
|
image = pipe.tgate(
|
||||||
"An alpaca made of colorful building blocks, cyberpunk.",
|
"An alpaca made of colorful building blocks, cyberpunk.",
|
||||||
gate_step=gate_step,
|
gate_step=gate_step,
|
||||||
num_inference_steps=inference_step,
|
num_inference_steps=inference_step,
|
||||||
).images[0]
|
).images[0]
|
||||||
```
|
```
|
||||||
@@ -78,9 +78,9 @@ pipe = TgateSDXLLoader(
|
|||||||
).to("cuda")
|
).to("cuda")
|
||||||
|
|
||||||
image = pipe.tgate(
|
image = pipe.tgate(
|
||||||
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.",
|
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.",
|
||||||
gate_step=gate_step,
|
gate_step=gate_step,
|
||||||
num_inference_steps=inference_step
|
num_inference_steps=inference_step
|
||||||
).images[0]
|
).images[0]
|
||||||
```
|
```
|
||||||
</hfoption>
|
</hfoption>
|
||||||
@@ -111,9 +111,9 @@ pipe = TgateSDXLDeepCacheLoader(
|
|||||||
).to("cuda")
|
).to("cuda")
|
||||||
|
|
||||||
image = pipe.tgate(
|
image = pipe.tgate(
|
||||||
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.",
|
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.",
|
||||||
gate_step=gate_step,
|
gate_step=gate_step,
|
||||||
num_inference_steps=inference_step
|
num_inference_steps=inference_step
|
||||||
).images[0]
|
).images[0]
|
||||||
```
|
```
|
||||||
</hfoption>
|
</hfoption>
|
||||||
@@ -151,9 +151,9 @@ pipe = TgateSDXLLoader(
|
|||||||
).to("cuda")
|
).to("cuda")
|
||||||
|
|
||||||
image = pipe.tgate(
|
image = pipe.tgate(
|
||||||
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.",
|
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.",
|
||||||
gate_step=gate_step,
|
gate_step=gate_step,
|
||||||
num_inference_steps=inference_step
|
num_inference_steps=inference_step
|
||||||
).images[0]
|
).images[0]
|
||||||
```
|
```
|
||||||
</hfoption>
|
</hfoption>
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ One of the simplest ways to speed up inference is to place the pipeline on a GPU
|
|||||||
pipeline = pipeline.to("cuda")
|
pipeline = pipeline.to("cuda")
|
||||||
```
|
```
|
||||||
|
|
||||||
To make sure you can use the same image and improve on it, use a [`Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) and set a seed for [reproducibility](./using-diffusers/reproducibility):
|
To make sure you can use the same image and improve on it, use a [`Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) and set a seed for [reproducibility](./using-diffusers/reusing_seeds):
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
|
|||||||
@@ -349,7 +349,7 @@ control_image = load_image("./conditioning_image_1.png")
|
|||||||
prompt = "pale golden rod circle with old lace background"
|
prompt = "pale golden rod circle with old lace background"
|
||||||
|
|
||||||
generator = torch.manual_seed(0)
|
generator = torch.manual_seed(0)
|
||||||
image = pipe(prompt, num_inference_steps=20, generator=generator, image=control_image).images[0]
|
image = pipeline(prompt, num_inference_steps=20, generator=generator, image=control_image).images[0]
|
||||||
image.save("./output.png")
|
image.save("./output.png")
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -52,76 +52,6 @@ To learn more, take a look at the [Distributed Inference with 🤗 Accelerate](h
|
|||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
### Device placement
|
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> This feature is experimental and its APIs might change in the future.
|
|
||||||
|
|
||||||
With Accelerate, you can use the `device_map` to determine how to distribute the models of a pipeline across multiple devices. This is useful in situations where you have more than one GPU.
|
|
||||||
|
|
||||||
For example, if you have two 8GB GPUs, then using [`~DiffusionPipeline.enable_model_cpu_offload`] may not work so well because:
|
|
||||||
|
|
||||||
* it only works on a single GPU
|
|
||||||
* a single model might not fit on a single GPU ([`~DiffusionPipeline.enable_sequential_cpu_offload`] might work but it will be extremely slow and it is also limited to a single GPU)
|
|
||||||
|
|
||||||
To make use of both GPUs, you can use the "balanced" device placement strategy which splits the models across all available GPUs.
|
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> Only the "balanced" strategy is supported at the moment, and we plan to support additional mapping strategies in the future.
|
|
||||||
|
|
||||||
```diff
|
|
||||||
from diffusers import DiffusionPipeline
|
|
||||||
import torch
|
|
||||||
|
|
||||||
pipeline = DiffusionPipeline.from_pretrained(
|
|
||||||
- "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True,
|
|
||||||
+ "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, device_map="balanced"
|
|
||||||
)
|
|
||||||
image = pipeline("a dog").images[0]
|
|
||||||
image
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also pass a dictionary to enforce the maximum GPU memory that can be used on each device:
|
|
||||||
|
|
||||||
```diff
|
|
||||||
from diffusers import DiffusionPipeline
|
|
||||||
import torch
|
|
||||||
|
|
||||||
max_memory = {0:"1GB", 1:"1GB"}
|
|
||||||
pipeline = DiffusionPipeline.from_pretrained(
|
|
||||||
"runwayml/stable-diffusion-v1-5",
|
|
||||||
torch_dtype=torch.float16,
|
|
||||||
use_safetensors=True,
|
|
||||||
device_map="balanced",
|
|
||||||
+ max_memory=max_memory
|
|
||||||
)
|
|
||||||
image = pipeline("a dog").images[0]
|
|
||||||
image
|
|
||||||
```
|
|
||||||
|
|
||||||
If a device is not present in `max_memory`, then it will be completely ignored and will not participate in the device placement.
|
|
||||||
|
|
||||||
By default, Diffusers uses the maximum memory of all devices. If the models don't fit on the GPUs, they are offloaded to the CPU. If the CPU doesn't have enough memory, then you might see an error. In that case, you could defer to using [`~DiffusionPipeline.enable_sequential_cpu_offload`] and [`~DiffusionPipeline.enable_model_cpu_offload`].
|
|
||||||
|
|
||||||
Call [`~DiffusionPipeline.reset_device_map`] to reset the `device_map` of a pipeline. This is also necessary if you want to use methods like `to()`, [`~DiffusionPipeline.enable_sequential_cpu_offload`], and [`~DiffusionPipeline.enable_model_cpu_offload`] on a pipeline that was device-mapped.
|
|
||||||
|
|
||||||
```py
|
|
||||||
pipeline.reset_device_map()
|
|
||||||
```
|
|
||||||
|
|
||||||
Once a pipeline has been device-mapped, you can also access its device map via `hf_device_map`:
|
|
||||||
|
|
||||||
```py
|
|
||||||
print(pipeline.hf_device_map)
|
|
||||||
```
|
|
||||||
|
|
||||||
An example device map would look like so:
|
|
||||||
|
|
||||||
|
|
||||||
```bash
|
|
||||||
{'unet': 1, 'vae': 1, 'safety_checker': 0, 'text_encoder': 0}
|
|
||||||
```
|
|
||||||
|
|
||||||
## PyTorch Distributed
|
## PyTorch Distributed
|
||||||
|
|
||||||
PyTorch supports [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) which enables data parallelism.
|
PyTorch supports [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) which enables data parallelism.
|
||||||
@@ -176,3 +106,6 @@ Once you've completed the inference script, use the `--nproc_per_node` argument
|
|||||||
```bash
|
```bash
|
||||||
torchrun run_distributed.py --nproc_per_node=2
|
torchrun run_distributed.py --nproc_per_node=2
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> You can use `device_map` within a [`DiffusionPipeline`] to distribute its model-level components on multiple devices. Refer to the [Device placement](../tutorials/inference_with_big_models#device-placement) guide to learn more.
|
||||||
@@ -440,6 +440,198 @@ Stable Diffusion XL (SDXL) is a powerful text-to-image model that generates high
|
|||||||
|
|
||||||
The SDXL training script is discussed in more detail in the [SDXL training](sdxl) guide.
|
The SDXL training script is discussed in more detail in the [SDXL training](sdxl) guide.
|
||||||
|
|
||||||
|
## DeepFloyd IF
|
||||||
|
|
||||||
|
DeepFloyd IF is a cascading pixel diffusion model with three stages. The first stage generates a base image and the second and third stages progressively upscales the base image into a high-resolution 1024x1024 image. Use the [train_dreambooth_lora.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py) or [train_dreambooth.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py) scripts to train a DeepFloyd IF model with LoRA or the full model.
|
||||||
|
|
||||||
|
DeepFloyd IF uses predicted variance, but the Diffusers training scripts uses predicted error so the trained DeepFloyd IF models are switched to a fixed variance schedule. The training scripts will update the scheduler config of the fully trained model for you. However, when you load the saved LoRA weights you must also update the pipeline's scheduler config.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers import DiffusionPipeline
|
||||||
|
|
||||||
|
pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", use_safetensors=True)
|
||||||
|
|
||||||
|
pipe.load_lora_weights("<lora weights path>")
|
||||||
|
|
||||||
|
# Update scheduler config to fixed variance schedule
|
||||||
|
pipe.scheduler = pipe.scheduler.__class__.from_config(pipe.scheduler.config, variance_type="fixed_small")
|
||||||
|
```
|
||||||
|
|
||||||
|
The stage 2 model requires additional validation images to upscale. You can download and use a downsized version of the training images for this.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from huggingface_hub import snapshot_download
|
||||||
|
|
||||||
|
local_dir = "./dog_downsized"
|
||||||
|
snapshot_download(
|
||||||
|
"diffusers/dog-example-downsized",
|
||||||
|
local_dir=local_dir,
|
||||||
|
repo_type="dataset",
|
||||||
|
ignore_patterns=".gitattributes",
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
The code samples below provide a brief overview of how to train a DeepFloyd IF model with a combination of DreamBooth and LoRA. Some important parameters to note are:
|
||||||
|
|
||||||
|
* `--resolution=64`, a much smaller resolution is required because DeepFloyd IF is a pixel diffusion model and to work on uncompressed pixels, the input images must be smaller
|
||||||
|
* `--pre_compute_text_embeddings`, compute the text embeddings ahead of time to save memory because the [`~transformers.T5Model`] can take up a lot of memory
|
||||||
|
* `--tokenizer_max_length=77`, you can use a longer default text length with T5 as the text encoder but the default model encoding procedure uses a shorter text length
|
||||||
|
* `--text_encoder_use_attention_mask`, to pass the attention mask to the text encoder
|
||||||
|
|
||||||
|
<hfoptions id="IF-DreamBooth">
|
||||||
|
<hfoption id="Stage 1 LoRA DreamBooth">
|
||||||
|
|
||||||
|
Training stage 1 of DeepFloyd IF with LoRA and DreamBooth requires ~28GB of memory.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export MODEL_NAME="DeepFloyd/IF-I-XL-v1.0"
|
||||||
|
export INSTANCE_DIR="dog"
|
||||||
|
export OUTPUT_DIR="dreambooth_dog_lora"
|
||||||
|
|
||||||
|
accelerate launch train_dreambooth_lora.py \
|
||||||
|
--report_to wandb \
|
||||||
|
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||||
|
--instance_data_dir=$INSTANCE_DIR \
|
||||||
|
--output_dir=$OUTPUT_DIR \
|
||||||
|
--instance_prompt="a sks dog" \
|
||||||
|
--resolution=64 \
|
||||||
|
--train_batch_size=4 \
|
||||||
|
--gradient_accumulation_steps=1 \
|
||||||
|
--learning_rate=5e-6 \
|
||||||
|
--scale_lr \
|
||||||
|
--max_train_steps=1200 \
|
||||||
|
--validation_prompt="a sks dog" \
|
||||||
|
--validation_epochs=25 \
|
||||||
|
--checkpointing_steps=100 \
|
||||||
|
--pre_compute_text_embeddings \
|
||||||
|
--tokenizer_max_length=77 \
|
||||||
|
--text_encoder_use_attention_mask
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="Stage 2 LoRA DreamBooth">
|
||||||
|
|
||||||
|
For stage 2 of DeepFloyd IF with LoRA and DreamBooth, pay attention to these parameters:
|
||||||
|
|
||||||
|
* `--validation_images`, the images to upscale during validation
|
||||||
|
* `--class_labels_conditioning=timesteps`, to additionally conditional the UNet as needed in stage 2
|
||||||
|
* `--learning_rate=1e-6`, a lower learning rate is used compared to stage 1
|
||||||
|
* `--resolution=256`, the expected resolution for the upscaler
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export MODEL_NAME="DeepFloyd/IF-II-L-v1.0"
|
||||||
|
export INSTANCE_DIR="dog"
|
||||||
|
export OUTPUT_DIR="dreambooth_dog_upscale"
|
||||||
|
export VALIDATION_IMAGES="dog_downsized/image_1.png dog_downsized/image_2.png dog_downsized/image_3.png dog_downsized/image_4.png"
|
||||||
|
|
||||||
|
python train_dreambooth_lora.py \
|
||||||
|
--report_to wandb \
|
||||||
|
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||||
|
--instance_data_dir=$INSTANCE_DIR \
|
||||||
|
--output_dir=$OUTPUT_DIR \
|
||||||
|
--instance_prompt="a sks dog" \
|
||||||
|
--resolution=256 \
|
||||||
|
--train_batch_size=4 \
|
||||||
|
--gradient_accumulation_steps=1 \
|
||||||
|
--learning_rate=1e-6 \
|
||||||
|
--max_train_steps=2000 \
|
||||||
|
--validation_prompt="a sks dog" \
|
||||||
|
--validation_epochs=100 \
|
||||||
|
--checkpointing_steps=500 \
|
||||||
|
--pre_compute_text_embeddings \
|
||||||
|
--tokenizer_max_length=77 \
|
||||||
|
--text_encoder_use_attention_mask \
|
||||||
|
--validation_images $VALIDATION_IMAGES \
|
||||||
|
--class_labels_conditioning=timesteps
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="Stage 1 DreamBooth">
|
||||||
|
|
||||||
|
For stage 1 of DeepFloyd IF with DreamBooth, pay attention to these parameters:
|
||||||
|
|
||||||
|
* `--skip_save_text_encoder`, to skip saving the full T5 text encoder with the finetuned model
|
||||||
|
* `--use_8bit_adam`, to use 8-bit Adam optimizer to save memory due to the size of the optimizer state when training the full model
|
||||||
|
* `--learning_rate=1e-7`, a really low learning rate should be used for full model training otherwise the model quality is degraded (you can use a higher learning rate with a larger batch size)
|
||||||
|
|
||||||
|
Training with 8-bit Adam and a batch size of 4, the full model can be trained with ~48GB of memory.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export MODEL_NAME="DeepFloyd/IF-I-XL-v1.0"
|
||||||
|
export INSTANCE_DIR="dog"
|
||||||
|
export OUTPUT_DIR="dreambooth_if"
|
||||||
|
|
||||||
|
accelerate launch train_dreambooth.py \
|
||||||
|
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||||
|
--instance_data_dir=$INSTANCE_DIR \
|
||||||
|
--output_dir=$OUTPUT_DIR \
|
||||||
|
--instance_prompt="a photo of sks dog" \
|
||||||
|
--resolution=64 \
|
||||||
|
--train_batch_size=4 \
|
||||||
|
--gradient_accumulation_steps=1 \
|
||||||
|
--learning_rate=1e-7 \
|
||||||
|
--max_train_steps=150 \
|
||||||
|
--validation_prompt "a photo of sks dog" \
|
||||||
|
--validation_steps 25 \
|
||||||
|
--text_encoder_use_attention_mask \
|
||||||
|
--tokenizer_max_length 77 \
|
||||||
|
--pre_compute_text_embeddings \
|
||||||
|
--use_8bit_adam \
|
||||||
|
--set_grads_to_none \
|
||||||
|
--skip_save_text_encoder \
|
||||||
|
--push_to_hub
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="Stage 2 DreamBooth">
|
||||||
|
|
||||||
|
For stage 2 of DeepFloyd IF with DreamBooth, pay attention to these parameters:
|
||||||
|
|
||||||
|
* `--learning_rate=5e-6`, use a lower learning rate with a smaller effective batch size
|
||||||
|
* `--resolution=256`, the expected resolution for the upscaler
|
||||||
|
* `--train_batch_size=2` and `--gradient_accumulation_steps=6`, to effectively train on images wiht faces requires larger batch sizes
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export MODEL_NAME="DeepFloyd/IF-II-L-v1.0"
|
||||||
|
export INSTANCE_DIR="dog"
|
||||||
|
export OUTPUT_DIR="dreambooth_dog_upscale"
|
||||||
|
export VALIDATION_IMAGES="dog_downsized/image_1.png dog_downsized/image_2.png dog_downsized/image_3.png dog_downsized/image_4.png"
|
||||||
|
|
||||||
|
accelerate launch train_dreambooth.py \
|
||||||
|
--report_to wandb \
|
||||||
|
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||||
|
--instance_data_dir=$INSTANCE_DIR \
|
||||||
|
--output_dir=$OUTPUT_DIR \
|
||||||
|
--instance_prompt="a sks dog" \
|
||||||
|
--resolution=256 \
|
||||||
|
--train_batch_size=2 \
|
||||||
|
--gradient_accumulation_steps=6 \
|
||||||
|
--learning_rate=5e-6 \
|
||||||
|
--max_train_steps=2000 \
|
||||||
|
--validation_prompt="a sks dog" \
|
||||||
|
--validation_steps=150 \
|
||||||
|
--checkpointing_steps=500 \
|
||||||
|
--pre_compute_text_embeddings \
|
||||||
|
--tokenizer_max_length=77 \
|
||||||
|
--text_encoder_use_attention_mask \
|
||||||
|
--validation_images $VALIDATION_IMAGES \
|
||||||
|
--class_labels_conditioning timesteps \
|
||||||
|
--push_to_hub
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
|
### Training tips
|
||||||
|
|
||||||
|
Training the DeepFloyd IF model can be challenging, but here are some tips that we've found helpful:
|
||||||
|
|
||||||
|
- LoRA is sufficient for training the stage 1 model because the model's low resolution makes representing finer details difficult regardless.
|
||||||
|
- For common or simple objects, you don't necessarily need to finetune the upscaler. Make sure the prompt passed to the upscaler is adjusted to remove the new token from the instance prompt. For example, if your stage 1 prompt is "a sks dog" then your stage 2 prompt should be "a dog".
|
||||||
|
- For finer details like faces, fully training the stage 2 upscaler is better than training the stage 2 model with LoRA. It also helps to use lower learning rates with larger batch sizes.
|
||||||
|
- Lower learning rates should be used to train the stage 2 model.
|
||||||
|
- The [`DDPMScheduler`] works better than the DPMSolver used in the training scripts.
|
||||||
|
|
||||||
## Next steps
|
## Next steps
|
||||||
|
|
||||||
Congratulations on training your DreamBooth model! To learn more about how to use your new model, the following guide may be helpful:
|
Congratulations on training your DreamBooth model! To learn more about how to use your new model, the following guide may be helpful:
|
||||||
|
|||||||
@@ -205,7 +205,7 @@ model_pred = unet(noisy_latents, timesteps, None, added_cond_kwargs=added_cond_k
|
|||||||
|
|
||||||
Once you’ve made all your changes or you’re okay with the default configuration, you’re ready to launch the training script! 🚀
|
Once you’ve made all your changes or you’re okay with the default configuration, you’re ready to launch the training script! 🚀
|
||||||
|
|
||||||
You'll train on the [Pokémon BLIP captions](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) dataset to generate your own Pokémon, but you can also create and train on your own dataset by following the [Create a dataset for training](create_dataset) guide. Set the environment variable `DATASET_NAME` to the name of the dataset on the Hub or if you're training on your own files, set the environment variable `TRAIN_DIR` to a path to your dataset.
|
You'll train on the [Naruto BLIP captions](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) dataset to generate your own Naruto characters, but you can also create and train on your own dataset by following the [Create a dataset for training](create_dataset) guide. Set the environment variable `DATASET_NAME` to the name of the dataset on the Hub or if you're training on your own files, set the environment variable `TRAIN_DIR` to a path to your dataset.
|
||||||
|
|
||||||
If you’re training on more than one GPU, add the `--multi_gpu` parameter to the `accelerate launch` command.
|
If you’re training on more than one GPU, add the `--multi_gpu` parameter to the `accelerate launch` command.
|
||||||
|
|
||||||
@@ -219,7 +219,7 @@ To monitor training progress with Weights & Biases, add the `--report_to=wandb`
|
|||||||
<hfoption id="prior model">
|
<hfoption id="prior model">
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export DATASET_NAME="lambdalabs/pokemon-blip-captions"
|
export DATASET_NAME="lambdalabs/naruto-blip-captions"
|
||||||
|
|
||||||
accelerate launch --mixed_precision="fp16" train_text_to_image_prior.py \
|
accelerate launch --mixed_precision="fp16" train_text_to_image_prior.py \
|
||||||
--dataset_name=$DATASET_NAME \
|
--dataset_name=$DATASET_NAME \
|
||||||
@@ -232,17 +232,17 @@ accelerate launch --mixed_precision="fp16" train_text_to_image_prior.py \
|
|||||||
--checkpoints_total_limit=3 \
|
--checkpoints_total_limit=3 \
|
||||||
--lr_scheduler="constant" \
|
--lr_scheduler="constant" \
|
||||||
--lr_warmup_steps=0 \
|
--lr_warmup_steps=0 \
|
||||||
--validation_prompts="A robot pokemon, 4k photo" \
|
--validation_prompts="A robot naruto, 4k photo" \
|
||||||
--report_to="wandb" \
|
--report_to="wandb" \
|
||||||
--push_to_hub \
|
--push_to_hub \
|
||||||
--output_dir="kandi2-prior-pokemon-model"
|
--output_dir="kandi2-prior-naruto-model"
|
||||||
```
|
```
|
||||||
|
|
||||||
</hfoption>
|
</hfoption>
|
||||||
<hfoption id="decoder model">
|
<hfoption id="decoder model">
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export DATASET_NAME="lambdalabs/pokemon-blip-captions"
|
export DATASET_NAME="lambdalabs/naruto-blip-captions"
|
||||||
|
|
||||||
accelerate launch --mixed_precision="fp16" train_text_to_image_decoder.py \
|
accelerate launch --mixed_precision="fp16" train_text_to_image_decoder.py \
|
||||||
--dataset_name=$DATASET_NAME \
|
--dataset_name=$DATASET_NAME \
|
||||||
@@ -256,10 +256,10 @@ accelerate launch --mixed_precision="fp16" train_text_to_image_decoder.py \
|
|||||||
--checkpoints_total_limit=3 \
|
--checkpoints_total_limit=3 \
|
||||||
--lr_scheduler="constant" \
|
--lr_scheduler="constant" \
|
||||||
--lr_warmup_steps=0 \
|
--lr_warmup_steps=0 \
|
||||||
--validation_prompts="A robot pokemon, 4k photo" \
|
--validation_prompts="A robot naruto, 4k photo" \
|
||||||
--report_to="wandb" \
|
--report_to="wandb" \
|
||||||
--push_to_hub \
|
--push_to_hub \
|
||||||
--output_dir="kandi2-decoder-pokemon-model"
|
--output_dir="kandi2-decoder-naruto-model"
|
||||||
```
|
```
|
||||||
|
|
||||||
</hfoption>
|
</hfoption>
|
||||||
@@ -279,7 +279,7 @@ prior_components = {"prior_" + k: v for k,v in prior_pipeline.components.items()
|
|||||||
pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", **prior_components, torch_dtype=torch.float16)
|
pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", **prior_components, torch_dtype=torch.float16)
|
||||||
|
|
||||||
pipe.enable_model_cpu_offload()
|
pipe.enable_model_cpu_offload()
|
||||||
prompt="A robot pokemon, 4k photo"
|
prompt="A robot naruto, 4k photo"
|
||||||
image = pipeline(prompt=prompt, negative_prompt=negative_prompt).images[0]
|
image = pipeline(prompt=prompt, negative_prompt=negative_prompt).images[0]
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -299,7 +299,7 @@ import torch
|
|||||||
pipeline = AutoPipelineForText2Image.from_pretrained("path/to/saved/model", torch_dtype=torch.float16)
|
pipeline = AutoPipelineForText2Image.from_pretrained("path/to/saved/model", torch_dtype=torch.float16)
|
||||||
pipeline.enable_model_cpu_offload()
|
pipeline.enable_model_cpu_offload()
|
||||||
|
|
||||||
prompt="A robot pokemon, 4k photo"
|
prompt="A robot naruto, 4k photo"
|
||||||
image = pipeline(prompt=prompt).images[0]
|
image = pipeline(prompt=prompt).images[0]
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -313,7 +313,7 @@ unet = UNet2DConditionModel.from_pretrained("path/to/saved/model" + "/checkpoint
|
|||||||
pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", unet=unet, torch_dtype=torch.float16)
|
pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", unet=unet, torch_dtype=torch.float16)
|
||||||
pipeline.enable_model_cpu_offload()
|
pipeline.enable_model_cpu_offload()
|
||||||
|
|
||||||
image = pipeline(prompt="A robot pokemon, 4k photo").images[0]
|
image = pipeline(prompt="A robot naruto, 4k photo").images[0]
|
||||||
```
|
```
|
||||||
|
|
||||||
</hfoption>
|
</hfoption>
|
||||||
|
|||||||
@@ -170,7 +170,7 @@ Aside from setting up the LoRA layers, the training script is more or less the s
|
|||||||
|
|
||||||
Once you've made all your changes or you're okay with the default configuration, you're ready to launch the training script! 🚀
|
Once you've made all your changes or you're okay with the default configuration, you're ready to launch the training script! 🚀
|
||||||
|
|
||||||
Let's train on the [Pokémon BLIP captions](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) dataset to generate our own Pokémon. Set the environment variables `MODEL_NAME` and `DATASET_NAME` to the model and dataset respectively. You should also specify where to save the model in `OUTPUT_DIR`, and the name of the model to save to on the Hub with `HUB_MODEL_ID`. The script creates and saves the following files to your repository:
|
Let's train on the [Naruto BLIP captions](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) dataset to generate your own Naruto characters. Set the environment variables `MODEL_NAME` and `DATASET_NAME` to the model and dataset respectively. You should also specify where to save the model in `OUTPUT_DIR`, and the name of the model to save to on the Hub with `HUB_MODEL_ID`. The script creates and saves the following files to your repository:
|
||||||
|
|
||||||
- saved model checkpoints
|
- saved model checkpoints
|
||||||
- `pytorch_lora_weights.safetensors` (the trained LoRA weights)
|
- `pytorch_lora_weights.safetensors` (the trained LoRA weights)
|
||||||
@@ -185,9 +185,9 @@ A full training run takes ~5 hours on a 2080 Ti GPU with 11GB of VRAM.
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
|
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
|
||||||
export OUTPUT_DIR="/sddata/finetune/lora/pokemon"
|
export OUTPUT_DIR="/sddata/finetune/lora/naruto"
|
||||||
export HUB_MODEL_ID="pokemon-lora"
|
export HUB_MODEL_ID="naruto-lora"
|
||||||
export DATASET_NAME="lambdalabs/pokemon-blip-captions"
|
export DATASET_NAME="lambdalabs/naruto-blip-captions"
|
||||||
|
|
||||||
accelerate launch --mixed_precision="fp16" train_text_to_image_lora.py \
|
accelerate launch --mixed_precision="fp16" train_text_to_image_lora.py \
|
||||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||||
@@ -208,7 +208,7 @@ accelerate launch --mixed_precision="fp16" train_text_to_image_lora.py \
|
|||||||
--hub_model_id=${HUB_MODEL_ID} \
|
--hub_model_id=${HUB_MODEL_ID} \
|
||||||
--report_to=wandb \
|
--report_to=wandb \
|
||||||
--checkpointing_steps=500 \
|
--checkpointing_steps=500 \
|
||||||
--validation_prompt="A pokemon with blue eyes." \
|
--validation_prompt="A naruto with blue eyes." \
|
||||||
--seed=1337
|
--seed=1337
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -220,7 +220,7 @@ import torch
|
|||||||
|
|
||||||
pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda")
|
pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda")
|
||||||
pipeline.load_lora_weights("path/to/lora/model", weight_name="pytorch_lora_weights.safetensors")
|
pipeline.load_lora_weights("path/to/lora/model", weight_name="pytorch_lora_weights.safetensors")
|
||||||
image = pipeline("A pokemon with blue eyes").images[0]
|
image = pipeline("A naruto with blue eyes").images[0]
|
||||||
```
|
```
|
||||||
|
|
||||||
## Next steps
|
## Next steps
|
||||||
|
|||||||
@@ -176,7 +176,7 @@ If you want to learn more about how the training loop works, check out the [Unde
|
|||||||
|
|
||||||
Once you’ve made all your changes or you’re okay with the default configuration, you’re ready to launch the training script! 🚀
|
Once you’ve made all your changes or you’re okay with the default configuration, you’re ready to launch the training script! 🚀
|
||||||
|
|
||||||
Let’s train on the [Pokémon BLIP captions](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) dataset to generate your own Pokémon. Set the environment variables `MODEL_NAME` and `DATASET_NAME` to the model and the dataset (either from the Hub or a local path). You should also specify a VAE other than the SDXL VAE (either from the Hub or a local path) with `VAE_NAME` to avoid numerical instabilities.
|
Let’s train on the [Naruto BLIP captions](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) dataset to generate your own Naruto characters. Set the environment variables `MODEL_NAME` and `DATASET_NAME` to the model and the dataset (either from the Hub or a local path). You should also specify a VAE other than the SDXL VAE (either from the Hub or a local path) with `VAE_NAME` to avoid numerical instabilities.
|
||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
|
|
||||||
@@ -187,7 +187,7 @@ To monitor training progress with Weights & Biases, add the `--report_to=wandb`
|
|||||||
```bash
|
```bash
|
||||||
export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0"
|
export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0"
|
||||||
export VAE_NAME="madebyollin/sdxl-vae-fp16-fix"
|
export VAE_NAME="madebyollin/sdxl-vae-fp16-fix"
|
||||||
export DATASET_NAME="lambdalabs/pokemon-blip-captions"
|
export DATASET_NAME="lambdalabs/naruto-blip-captions"
|
||||||
|
|
||||||
accelerate launch train_text_to_image_sdxl.py \
|
accelerate launch train_text_to_image_sdxl.py \
|
||||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||||
@@ -211,7 +211,7 @@ accelerate launch train_text_to_image_sdxl.py \
|
|||||||
--validation_prompt="a cute Sundar Pichai creature" \
|
--validation_prompt="a cute Sundar Pichai creature" \
|
||||||
--validation_epochs 5 \
|
--validation_epochs 5 \
|
||||||
--checkpointing_steps=5000 \
|
--checkpointing_steps=5000 \
|
||||||
--output_dir="sdxl-pokemon-model" \
|
--output_dir="sdxl-naruto-model" \
|
||||||
--push_to_hub
|
--push_to_hub
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -226,9 +226,9 @@ import torch
|
|||||||
|
|
||||||
pipeline = DiffusionPipeline.from_pretrained("path/to/your/model", torch_dtype=torch.float16).to("cuda")
|
pipeline = DiffusionPipeline.from_pretrained("path/to/your/model", torch_dtype=torch.float16).to("cuda")
|
||||||
|
|
||||||
prompt = "A pokemon with green eyes and red legs."
|
prompt = "A naruto with green eyes and red legs."
|
||||||
image = pipeline(prompt, num_inference_steps=30, guidance_scale=7.5).images[0]
|
image = pipeline(prompt, num_inference_steps=30, guidance_scale=7.5).images[0]
|
||||||
image.save("pokemon.png")
|
image.save("naruto.png")
|
||||||
```
|
```
|
||||||
|
|
||||||
</hfoption>
|
</hfoption>
|
||||||
@@ -244,11 +244,11 @@ import torch_xla.core.xla_model as xm
|
|||||||
device = xm.xla_device()
|
device = xm.xla_device()
|
||||||
pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0").to(device)
|
pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0").to(device)
|
||||||
|
|
||||||
prompt = "A pokemon with green eyes and red legs."
|
prompt = "A naruto with green eyes and red legs."
|
||||||
start = time()
|
start = time()
|
||||||
image = pipeline(prompt, num_inference_steps=inference_steps).images[0]
|
image = pipeline(prompt, num_inference_steps=inference_steps).images[0]
|
||||||
print(f'Compilation time is {time()-start} sec')
|
print(f'Compilation time is {time()-start} sec')
|
||||||
image.save("pokemon.png")
|
image.save("naruto.png")
|
||||||
|
|
||||||
start = time()
|
start = time()
|
||||||
image = pipeline(prompt, num_inference_steps=inference_steps).images[0]
|
image = pipeline(prompt, num_inference_steps=inference_steps).images[0]
|
||||||
|
|||||||
@@ -158,7 +158,7 @@ Once you've made all your changes or you're okay with the default configuration,
|
|||||||
<hfoptions id="training-inference">
|
<hfoptions id="training-inference">
|
||||||
<hfoption id="PyTorch">
|
<hfoption id="PyTorch">
|
||||||
|
|
||||||
Let's train on the [Pokémon BLIP captions](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) dataset to generate your own Pokémon. Set the environment variables `MODEL_NAME` and `dataset_name` to the model and the dataset (either from the Hub or a local path). If you're training on more than one GPU, add the `--multi_gpu` parameter to the `accelerate launch` command.
|
Let's train on the [Naruto BLIP captions](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) dataset to generate your own Naruto characters. Set the environment variables `MODEL_NAME` and `dataset_name` to the model and the dataset (either from the Hub or a local path). If you're training on more than one GPU, add the `--multi_gpu` parameter to the `accelerate launch` command.
|
||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
|
|
||||||
@@ -168,7 +168,7 @@ To train on a local dataset, set the `TRAIN_DIR` and `OUTPUT_DIR` environment va
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
|
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
|
||||||
export dataset_name="lambdalabs/pokemon-blip-captions"
|
export dataset_name="lambdalabs/naruto-blip-captions"
|
||||||
|
|
||||||
accelerate launch --mixed_precision="fp16" train_text_to_image.py \
|
accelerate launch --mixed_precision="fp16" train_text_to_image.py \
|
||||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||||
@@ -181,9 +181,9 @@ accelerate launch --mixed_precision="fp16" train_text_to_image.py \
|
|||||||
--max_train_steps=15000 \
|
--max_train_steps=15000 \
|
||||||
--learning_rate=1e-05 \
|
--learning_rate=1e-05 \
|
||||||
--max_grad_norm=1 \
|
--max_grad_norm=1 \
|
||||||
--enable_xformers_memory_efficient_attention
|
--enable_xformers_memory_efficient_attention \
|
||||||
--lr_scheduler="constant" --lr_warmup_steps=0 \
|
--lr_scheduler="constant" --lr_warmup_steps=0 \
|
||||||
--output_dir="sd-pokemon-model" \
|
--output_dir="sd-naruto-model" \
|
||||||
--push_to_hub
|
--push_to_hub
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -202,7 +202,7 @@ To train on a local dataset, set the `TRAIN_DIR` and `OUTPUT_DIR` environment va
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
|
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
|
||||||
export dataset_name="lambdalabs/pokemon-blip-captions"
|
export dataset_name="lambdalabs/naruto-blip-captions"
|
||||||
|
|
||||||
python train_text_to_image_flax.py \
|
python train_text_to_image_flax.py \
|
||||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||||
@@ -212,7 +212,7 @@ python train_text_to_image_flax.py \
|
|||||||
--max_train_steps=15000 \
|
--max_train_steps=15000 \
|
||||||
--learning_rate=1e-05 \
|
--learning_rate=1e-05 \
|
||||||
--max_grad_norm=1 \
|
--max_grad_norm=1 \
|
||||||
--output_dir="sd-pokemon-model" \
|
--output_dir="sd-naruto-model" \
|
||||||
--push_to_hub
|
--push_to_hub
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -231,7 +231,7 @@ import torch
|
|||||||
pipeline = StableDiffusionPipeline.from_pretrained("path/to/saved_model", torch_dtype=torch.float16, use_safetensors=True).to("cuda")
|
pipeline = StableDiffusionPipeline.from_pretrained("path/to/saved_model", torch_dtype=torch.float16, use_safetensors=True).to("cuda")
|
||||||
|
|
||||||
image = pipeline(prompt="yoda").images[0]
|
image = pipeline(prompt="yoda").images[0]
|
||||||
image.save("yoda-pokemon.png")
|
image.save("yoda-naruto.png")
|
||||||
```
|
```
|
||||||
|
|
||||||
</hfoption>
|
</hfoption>
|
||||||
@@ -246,7 +246,7 @@ from diffusers import FlaxStableDiffusionPipeline
|
|||||||
|
|
||||||
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained("path/to/saved_model", dtype=jax.numpy.bfloat16)
|
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained("path/to/saved_model", dtype=jax.numpy.bfloat16)
|
||||||
|
|
||||||
prompt = "yoda pokemon"
|
prompt = "yoda naruto"
|
||||||
prng_seed = jax.random.PRNGKey(0)
|
prng_seed = jax.random.PRNGKey(0)
|
||||||
num_inference_steps = 50
|
num_inference_steps = 50
|
||||||
|
|
||||||
@@ -261,7 +261,7 @@ prompt_ids = shard(prompt_ids)
|
|||||||
|
|
||||||
images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
|
images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
|
||||||
images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
|
images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
|
||||||
image.save("yoda-pokemon.png")
|
image.save("yoda-naruto.png")
|
||||||
```
|
```
|
||||||
|
|
||||||
</hfoption>
|
</hfoption>
|
||||||
|
|||||||
@@ -131,7 +131,7 @@ If you want to learn more about how the training loop works, check out the [Unde
|
|||||||
|
|
||||||
Once you’ve made all your changes or you’re okay with the default configuration, you’re ready to launch the training script! 🚀
|
Once you’ve made all your changes or you’re okay with the default configuration, you’re ready to launch the training script! 🚀
|
||||||
|
|
||||||
Set the `DATASET_NAME` environment variable to the dataset name from the Hub. This guide uses the [Pokémon BLIP captions](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) dataset, but you can create and train on your own datasets as well (see the [Create a dataset for training](create_dataset) guide).
|
Set the `DATASET_NAME` environment variable to the dataset name from the Hub. This guide uses the [Naruto BLIP captions](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) dataset, but you can create and train on your own datasets as well (see the [Create a dataset for training](create_dataset) guide).
|
||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
|
|
||||||
@@ -140,7 +140,7 @@ To monitor training progress with Weights & Biases, add the `--report_to=wandb`
|
|||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export DATASET_NAME="lambdalabs/pokemon-blip-captions"
|
export DATASET_NAME="lambdalabs/naruto-blip-captions"
|
||||||
|
|
||||||
accelerate launch train_text_to_image_prior.py \
|
accelerate launch train_text_to_image_prior.py \
|
||||||
--mixed_precision="fp16" \
|
--mixed_precision="fp16" \
|
||||||
@@ -156,10 +156,10 @@ accelerate launch train_text_to_image_prior.py \
|
|||||||
--checkpoints_total_limit=3 \
|
--checkpoints_total_limit=3 \
|
||||||
--lr_scheduler="constant" \
|
--lr_scheduler="constant" \
|
||||||
--lr_warmup_steps=0 \
|
--lr_warmup_steps=0 \
|
||||||
--validation_prompts="A robot pokemon, 4k photo" \
|
--validation_prompts="A robot naruto, 4k photo" \
|
||||||
--report_to="wandb" \
|
--report_to="wandb" \
|
||||||
--push_to_hub \
|
--push_to_hub \
|
||||||
--output_dir="wuerstchen-prior-pokemon-model"
|
--output_dir="wuerstchen-prior-naruto-model"
|
||||||
```
|
```
|
||||||
|
|
||||||
Once training is complete, you can use your newly trained model for inference!
|
Once training is complete, you can use your newly trained model for inference!
|
||||||
@@ -171,7 +171,7 @@ from diffusers.pipelines.wuerstchen import DEFAULT_STAGE_C_TIMESTEPS
|
|||||||
|
|
||||||
pipeline = AutoPipelineForText2Image.from_pretrained("path/to/saved/model", torch_dtype=torch.float16).to("cuda")
|
pipeline = AutoPipelineForText2Image.from_pretrained("path/to/saved/model", torch_dtype=torch.float16).to("cuda")
|
||||||
|
|
||||||
caption = "A cute bird pokemon holding a shield"
|
caption = "A cute bird naruto holding a shield"
|
||||||
images = pipeline(
|
images = pipeline(
|
||||||
caption,
|
caption,
|
||||||
width=1024,
|
width=1024,
|
||||||
|
|||||||
@@ -12,75 +12,74 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
# AutoPipeline
|
# AutoPipeline
|
||||||
|
|
||||||
🤗 Diffusers is able to complete many different tasks, and you can often reuse the same pretrained weights for multiple tasks such as text-to-image, image-to-image, and inpainting. If you're new to the library and diffusion models though, it may be difficult to know which pipeline to use for a task. For example, if you're using the [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) checkpoint for text-to-image, you might not know that you could also use it for image-to-image and inpainting by loading the checkpoint with the [`StableDiffusionImg2ImgPipeline`] and [`StableDiffusionInpaintPipeline`] classes respectively.
|
Diffusers provides many pipelines for basic tasks like generating images, videos, audio, and inpainting. On top of these, there are specialized pipelines for adapters and features like upscaling, super-resolution, and more. Different pipeline classes can even use the same checkpoint because they share the same pretrained model! With so many different pipelines, it can be overwhelming to know which pipeline class to use.
|
||||||
|
|
||||||
The `AutoPipeline` class is designed to simplify the variety of pipelines in 🤗 Diffusers. It is a generic, *task-first* pipeline that lets you focus on the task. The `AutoPipeline` automatically detects the correct pipeline class to use, which makes it easier to load a checkpoint for a task without knowing the specific pipeline class name.
|
The [AutoPipeline](../api/pipelines/auto_pipeline) class is designed to simplify the variety of pipelines in Diffusers. It is a generic *task-first* pipeline that lets you focus on a task ([`AutoPipelineForText2Image`], [`AutoPipelineForImage2Image`], and [`AutoPipelineForInpainting`]) without needing to know the specific pipeline class. The [AutoPipeline](../api/pipelines/auto_pipeline) automatically detects the correct pipeline class to use.
|
||||||
|
|
||||||
<Tip>
|
For example, let's use the [dreamlike-art/dreamlike-photoreal-2.0](https://hf.co/dreamlike-art/dreamlike-photoreal-2.0) checkpoint.
|
||||||
|
|
||||||
Take a look at the [AutoPipeline](../api/pipelines/auto_pipeline) reference to see which tasks are supported. Currently, it supports text-to-image, image-to-image, and inpainting.
|
Under the hood, [AutoPipeline](../api/pipelines/auto_pipeline):
|
||||||
|
|
||||||
</Tip>
|
1. Detects a `"stable-diffusion"` class from the [model_index.json](https://hf.co/dreamlike-art/dreamlike-photoreal-2.0/blob/main/model_index.json) file.
|
||||||
|
2. Depending on the task you're interested in, it loads the [`StableDiffusionPipeline`], [`StableDiffusionImg2ImgPipeline`], or [`StableDiffusionInpaintPipeline`]. Any parameter (`strength`, `num_inference_steps`, etc.) you would pass to these specific pipelines can also be passed to the [AutoPipeline](../api/pipelines/auto_pipeline).
|
||||||
|
|
||||||
This tutorial shows you how to use an `AutoPipeline` to automatically infer the pipeline class to load for a specific task, given the pretrained weights.
|
<hfoptions id="autopipeline">
|
||||||
|
<hfoption id="text-to-image">
|
||||||
## Choose an AutoPipeline for your task
|
|
||||||
|
|
||||||
Start by picking a checkpoint. For example, if you're interested in text-to-image with the [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) checkpoint, use [`AutoPipelineForText2Image`]:
|
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from diffusers import AutoPipelineForText2Image
|
from diffusers import AutoPipelineForText2Image
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
pipeline = AutoPipelineForText2Image.from_pretrained(
|
pipe_txt2img = AutoPipelineForText2Image.from_pretrained(
|
||||||
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True
|
"dreamlike-art/dreamlike-photoreal-2.0", torch_dtype=torch.float16, use_safetensors=True
|
||||||
).to("cuda")
|
).to("cuda")
|
||||||
prompt = "peasant and dragon combat, wood cutting style, viking era, bevel with rune"
|
|
||||||
|
|
||||||
image = pipeline(prompt, num_inference_steps=25).images[0]
|
prompt = "cinematic photo of Godzilla eating sushi with a cat in a izakaya, 35mm photograph, film, professional, 4k, highly detailed"
|
||||||
|
generator = torch.Generator(device="cpu").manual_seed(37)
|
||||||
|
image = pipe_txt2img(prompt, generator=generator).images[0]
|
||||||
image
|
image
|
||||||
```
|
```
|
||||||
|
|
||||||
<div class="flex justify-center">
|
<div class="flex justify-center">
|
||||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-text2img.png" alt="generated image of peasant fighting dragon in wood cutting style"/>
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-text2img.png"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
Under the hood, [`AutoPipelineForText2Image`]:
|
</hfoption>
|
||||||
|
<hfoption id="image-to-image">
|
||||||
1. automatically detects a `"stable-diffusion"` class from the [`model_index.json`](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/model_index.json) file
|
|
||||||
2. loads the corresponding text-to-image [`StableDiffusionPipeline`] based on the `"stable-diffusion"` class name
|
|
||||||
|
|
||||||
Likewise, for image-to-image, [`AutoPipelineForImage2Image`] detects a `"stable-diffusion"` checkpoint from the `model_index.json` file and it'll load the corresponding [`StableDiffusionImg2ImgPipeline`] behind the scenes. You can also pass any additional arguments specific to the pipeline class such as `strength`, which determines the amount of noise or variation added to an input image:
|
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from diffusers import AutoPipelineForImage2Image
|
from diffusers import AutoPipelineForImage2Image
|
||||||
|
from diffusers.utils import load_image
|
||||||
import torch
|
import torch
|
||||||
import requests
|
|
||||||
from PIL import Image
|
|
||||||
from io import BytesIO
|
|
||||||
|
|
||||||
pipeline = AutoPipelineForImage2Image.from_pretrained(
|
pipe_img2img = AutoPipelineForImage2Image.from_pretrained(
|
||||||
"runwayml/stable-diffusion-v1-5",
|
"dreamlike-art/dreamlike-photoreal-2.0", torch_dtype=torch.float16, use_safetensors=True
|
||||||
torch_dtype=torch.float16,
|
|
||||||
use_safetensors=True,
|
|
||||||
).to("cuda")
|
).to("cuda")
|
||||||
prompt = "a portrait of a dog wearing a pearl earring"
|
|
||||||
|
|
||||||
url = "https://upload.wikimedia.org/wikipedia/commons/thumb/0/0f/1665_Girl_with_a_Pearl_Earring.jpg/800px-1665_Girl_with_a_Pearl_Earring.jpg"
|
init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-text2img.png")
|
||||||
|
|
||||||
response = requests.get(url)
|
prompt = "cinematic photo of Godzilla eating burgers with a cat in a fast food restaurant, 35mm photograph, film, professional, 4k, highly detailed"
|
||||||
image = Image.open(BytesIO(response.content)).convert("RGB")
|
generator = torch.Generator(device="cpu").manual_seed(53)
|
||||||
image.thumbnail((768, 768))
|
image = pipe_img2img(prompt, image=init_image, generator=generator).images[0]
|
||||||
|
|
||||||
image = pipeline(prompt, image, num_inference_steps=200, strength=0.75, guidance_scale=10.5).images[0]
|
|
||||||
image
|
image
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Notice how the [dreamlike-art/dreamlike-photoreal-2.0](https://hf.co/dreamlike-art/dreamlike-photoreal-2.0) checkpoint is used for both text-to-image and image-to-image tasks? To save memory and avoid loading the checkpoint twice, use the [`~DiffusionPipeline.from_pipe`] method.
|
||||||
|
|
||||||
|
```py
|
||||||
|
pipe_img2img = AutoPipelineForImage2Image.from_pipe(pipe_txt2img).to("cuda")
|
||||||
|
image = pipeline(prompt, image=init_image, generator=generator).images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
You can learn more about the [`~DiffusionPipeline.from_pipe`] method in the [Reuse a pipeline](../using-diffusers/loading#reuse-a-pipeline) guide.
|
||||||
|
|
||||||
<div class="flex justify-center">
|
<div class="flex justify-center">
|
||||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-img2img.png" alt="generated image of a vermeer portrait of a dog wearing a pearl earring"/>
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-img2img.png"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
And if you want to do inpainting, then [`AutoPipelineForInpainting`] loads the underlying [`StableDiffusionInpaintPipeline`] class in the same way:
|
</hfoption>
|
||||||
|
<hfoption id="inpainting">
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from diffusers import AutoPipelineForInpainting
|
from diffusers import AutoPipelineForInpainting
|
||||||
@@ -91,22 +90,27 @@ pipeline = AutoPipelineForInpainting.from_pretrained(
|
|||||||
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True
|
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True
|
||||||
).to("cuda")
|
).to("cuda")
|
||||||
|
|
||||||
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
|
init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-img2img.png")
|
||||||
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
|
mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-mask.png")
|
||||||
|
|
||||||
init_image = load_image(img_url).convert("RGB")
|
prompt = "cinematic photo of a owl, 35mm photograph, film, professional, 4k, highly detailed"
|
||||||
mask_image = load_image(mask_url).convert("RGB")
|
generator = torch.Generator(device="cpu").manual_seed(38)
|
||||||
|
image = pipeline(prompt, image=init_image, mask_image=mask_image, generator=generator, strength=0.4).images[0]
|
||||||
prompt = "A majestic tiger sitting on a bench"
|
|
||||||
image = pipeline(prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80).images[0]
|
|
||||||
image
|
image
|
||||||
```
|
```
|
||||||
|
|
||||||
<div class="flex justify-center">
|
<div class="flex justify-center">
|
||||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-inpaint.png" alt="generated image of a tiger sitting on a bench"/>
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-inpaint.png"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
If you try to load an unsupported checkpoint, it'll throw an error:
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
|
## Unsupported checkpoints
|
||||||
|
|
||||||
|
The [AutoPipeline](../api/pipelines/auto_pipeline) supports [Stable Diffusion](../api/pipelines/stable_diffusion/overview), [Stable Diffusion XL](../api/pipelines/stable_diffusion/stable_diffusion_xl), [ControlNet](../api/pipelines/controlnet), [Kandinsky 2.1](../api/pipelines/kandinsky.md), [Kandinsky 2.2](../api/pipelines/kandinsky_v22), and [DeepFloyd IF](../api/pipelines/deepfloyd_if) checkpoints.
|
||||||
|
|
||||||
|
If you try to load an unsupported checkpoint, you'll get an error.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from diffusers import AutoPipelineForImage2Image
|
from diffusers import AutoPipelineForImage2Image
|
||||||
@@ -117,54 +121,3 @@ pipeline = AutoPipelineForImage2Image.from_pretrained(
|
|||||||
)
|
)
|
||||||
"ValueError: AutoPipeline can't find a pipeline linked to ShapEImg2ImgPipeline for None"
|
"ValueError: AutoPipeline can't find a pipeline linked to ShapEImg2ImgPipeline for None"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Use multiple pipelines
|
|
||||||
|
|
||||||
For some workflows or if you're loading many pipelines, it is more memory-efficient to reuse the same components from a checkpoint instead of reloading them which would unnecessarily consume additional memory. For example, if you're using a checkpoint for text-to-image and you want to use it again for image-to-image, use the [`~AutoPipelineForImage2Image.from_pipe`] method. This method creates a new pipeline from the components of a previously loaded pipeline at no additional memory cost.
|
|
||||||
|
|
||||||
The [`~AutoPipelineForImage2Image.from_pipe`] method detects the original pipeline class and maps it to the new pipeline class corresponding to the task you want to do. For example, if you load a `"stable-diffusion"` class pipeline for text-to-image:
|
|
||||||
|
|
||||||
```py
|
|
||||||
from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image
|
|
||||||
import torch
|
|
||||||
|
|
||||||
pipeline_text2img = AutoPipelineForText2Image.from_pretrained(
|
|
||||||
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True
|
|
||||||
)
|
|
||||||
print(type(pipeline_text2img))
|
|
||||||
"<class 'diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline'>"
|
|
||||||
```
|
|
||||||
|
|
||||||
Then [`~AutoPipelineForImage2Image.from_pipe`] maps the original `"stable-diffusion"` pipeline class to [`StableDiffusionImg2ImgPipeline`]:
|
|
||||||
|
|
||||||
```py
|
|
||||||
pipeline_img2img = AutoPipelineForImage2Image.from_pipe(pipeline_text2img)
|
|
||||||
print(type(pipeline_img2img))
|
|
||||||
"<class 'diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline'>"
|
|
||||||
```
|
|
||||||
|
|
||||||
If you passed an optional argument - like disabling the safety checker - to the original pipeline, this argument is also passed on to the new pipeline:
|
|
||||||
|
|
||||||
```py
|
|
||||||
from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image
|
|
||||||
import torch
|
|
||||||
|
|
||||||
pipeline_text2img = AutoPipelineForText2Image.from_pretrained(
|
|
||||||
"runwayml/stable-diffusion-v1-5",
|
|
||||||
torch_dtype=torch.float16,
|
|
||||||
use_safetensors=True,
|
|
||||||
requires_safety_checker=False,
|
|
||||||
).to("cuda")
|
|
||||||
|
|
||||||
pipeline_img2img = AutoPipelineForImage2Image.from_pipe(pipeline_text2img)
|
|
||||||
print(pipeline_img2img.config.requires_safety_checker)
|
|
||||||
"False"
|
|
||||||
```
|
|
||||||
|
|
||||||
You can overwrite any of the arguments and even configuration from the original pipeline if you want to change the behavior of the new pipeline. For example, to turn the safety checker back on and add the `strength` argument:
|
|
||||||
|
|
||||||
```py
|
|
||||||
pipeline_img2img = AutoPipelineForImage2Image.from_pipe(pipeline_text2img, requires_safety_checker=True, strength=0.3)
|
|
||||||
print(pipeline_img2img.config.requires_safety_checker)
|
|
||||||
"True"
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -260,7 +260,7 @@ Then, you'll need a way to evaluate the model. For evaluation, you can use the [
|
|||||||
... # The default pipeline output type is `List[PIL.Image]`
|
... # The default pipeline output type is `List[PIL.Image]`
|
||||||
... images = pipeline(
|
... images = pipeline(
|
||||||
... batch_size=config.eval_batch_size,
|
... batch_size=config.eval_batch_size,
|
||||||
... generator=torch.manual_seed(config.seed),
|
... generator=torch.Generator(device='cpu').manual_seed(config.seed), # Use a separate torch generator to avoid rewinding the random state of the main training loop
|
||||||
... ).images
|
... ).images
|
||||||
|
|
||||||
... # Make a grid out of the images
|
... # Make a grid out of the images
|
||||||
|
|||||||
@@ -34,13 +34,10 @@ Install [PyTorch nightly](https://pytorch.org/) to benefit from the latest and f
|
|||||||
pip3 install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu121
|
pip3 install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu121
|
||||||
```
|
```
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> The results reported below are from a 80GB 400W A100 with its clock rate set to the maximum.
|
||||||
|
> If you're interested in the full benchmarking code, take a look at [huggingface/diffusion-fast](https://github.com/huggingface/diffusion-fast).
|
||||||
|
|
||||||
The results reported below are from a 80GB 400W A100 with its clock rate set to the maximum. <br>
|
|
||||||
|
|
||||||
If you're interested in the full benchmarking code, take a look at [huggingface/diffusion-fast](https://github.com/huggingface/diffusion-fast).
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## Baseline
|
## Baseline
|
||||||
|
|
||||||
@@ -170,6 +167,9 @@ Using SDPA attention and compiling both the UNet and VAE cuts the latency from 3
|
|||||||
<img src="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/progressive-acceleration-sdxl/SDXL%2C_Batch_Size%3A_1%2C_Steps%3A_30_3.png" width=500>
|
<img src="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/progressive-acceleration-sdxl/SDXL%2C_Batch_Size%3A_1%2C_Steps%3A_30_3.png" width=500>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> From PyTorch 2.3.1, you can control the caching behavior of `torch.compile()`. This is particularly beneficial for compilation modes like `"max-autotune"` which performs a grid-search over several compilation flags to find the optimal configuration. Learn more in the [Compile Time Caching in torch.compile](https://pytorch.org/tutorials/recipes/torch_compile_caching_tutorial.html) tutorial.
|
||||||
|
|
||||||
### Prevent graph breaks
|
### Prevent graph breaks
|
||||||
|
|
||||||
Specifying `fullgraph=True` ensures there are no graph breaks in the underlying model to take full advantage of `torch.compile` without any performance degradation. For the UNet and VAE, this means changing how you access the return variables.
|
Specifying `fullgraph=True` ensures there are no graph breaks in the underlying model to take full advantage of `torch.compile` without any performance degradation. For the UNet and VAE, this means changing how you access the return variables.
|
||||||
|
|||||||
139
docs/source/en/tutorials/inference_with_big_models.md
Normal file
139
docs/source/en/tutorials/inference_with_big_models.md
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Working with big models
|
||||||
|
|
||||||
|
A modern diffusion model, like [Stable Diffusion XL (SDXL)](../using-diffusers/sdxl), is not just a single model, but a collection of multiple models. SDXL has four different model-level components:
|
||||||
|
|
||||||
|
* A variational autoencoder (VAE)
|
||||||
|
* Two text encoders
|
||||||
|
* A UNet for denoising
|
||||||
|
|
||||||
|
Usually, the text encoders and the denoiser are much larger compared to the VAE.
|
||||||
|
|
||||||
|
As models get bigger and better, it’s possible your model is so big that even a single copy won’t fit in memory. But that doesn’t mean it can’t be loaded. If you have more than one GPU, there is more memory available to store your model. In this case, it’s better to split your model checkpoint into several smaller *checkpoint shards*.
|
||||||
|
|
||||||
|
When a text encoder checkpoint has multiple shards, like [T5-xxl for SD3](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers/tree/main/text_encoder_3), it is automatically handled by the [Transformers](https://huggingface.co/docs/transformers/index) library as it is a required dependency of Diffusers when using the [`StableDiffusion3Pipeline`]. More specifically, Transformers will automatically handle the loading of multiple shards within the requested model class and get it ready so that inference can be performed.
|
||||||
|
|
||||||
|
The denoiser checkpoint can also have multiple shards and supports inference thanks to the [Accelerate](https://huggingface.co/docs/accelerate/index) library.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Refer to the [Handling big models for inference](https://huggingface.co/docs/accelerate/main/en/concept_guides/big_model_inference) guide for general guidance when working with big models that are hard to fit into memory.
|
||||||
|
|
||||||
|
For example, let's save a sharded checkpoint for the [SDXL UNet](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main/unet):
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import UNet2DConditionModel
|
||||||
|
|
||||||
|
unet = UNet2DConditionModel.from_pretrained(
|
||||||
|
"stabilityai/stable-diffusion-xl-base-1.0", subfolder="unet"
|
||||||
|
)
|
||||||
|
unet.save_pretrained("sdxl-unet-sharded", max_shard_size="5GB")
|
||||||
|
```
|
||||||
|
|
||||||
|
The size of the fp32 variant of the SDXL UNet checkpoint is ~10.4GB. Set the `max_shard_size` parameter to 5GB to create 3 shards. After saving, you can load them in [`StableDiffusionXLPipeline`]:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import UNet2DConditionModel, StableDiffusionXLPipeline
|
||||||
|
import torch
|
||||||
|
|
||||||
|
unet = UNet2DConditionModel.from_pretrained(
|
||||||
|
"sayakpaul/sdxl-unet-sharded", torch_dtype=torch.float16
|
||||||
|
)
|
||||||
|
pipeline = StableDiffusionXLPipeline.from_pretrained(
|
||||||
|
"stabilityai/stable-diffusion-xl-base-1.0", unet=unet, torch_dtype=torch.float16
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
image = pipeline("a cute dog running on the grass", num_inference_steps=30).images[0]
|
||||||
|
image.save("dog.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
If placing all the model-level components on the GPU at once is not feasible, use [`~DiffusionPipeline.enable_model_cpu_offload`] to help you:
|
||||||
|
|
||||||
|
```diff
|
||||||
|
- pipeline.to("cuda")
|
||||||
|
+ pipeline.enable_model_cpu_offload()
|
||||||
|
```
|
||||||
|
|
||||||
|
In general, we recommend sharding when a checkpoint is more than 5GB (in fp32).
|
||||||
|
|
||||||
|
## Device placement
|
||||||
|
|
||||||
|
On distributed setups, you can run inference across multiple GPUs with Accelerate.
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> This feature is experimental and its APIs might change in the future.
|
||||||
|
|
||||||
|
With Accelerate, you can use the `device_map` to determine how to distribute the models of a pipeline across multiple devices. This is useful in situations where you have more than one GPU.
|
||||||
|
|
||||||
|
For example, if you have two 8GB GPUs, then using [`~DiffusionPipeline.enable_model_cpu_offload`] may not work so well because:
|
||||||
|
|
||||||
|
* it only works on a single GPU
|
||||||
|
* a single model might not fit on a single GPU ([`~DiffusionPipeline.enable_sequential_cpu_offload`] might work but it will be extremely slow and it is also limited to a single GPU)
|
||||||
|
|
||||||
|
To make use of both GPUs, you can use the "balanced" device placement strategy which splits the models across all available GPUs.
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> Only the "balanced" strategy is supported at the moment, and we plan to support additional mapping strategies in the future.
|
||||||
|
|
||||||
|
```diff
|
||||||
|
from diffusers import DiffusionPipeline
|
||||||
|
import torch
|
||||||
|
|
||||||
|
pipeline = DiffusionPipeline.from_pretrained(
|
||||||
|
- "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True,
|
||||||
|
+ "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, device_map="balanced"
|
||||||
|
)
|
||||||
|
image = pipeline("a dog").images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also pass a dictionary to enforce the maximum GPU memory that can be used on each device:
|
||||||
|
|
||||||
|
```diff
|
||||||
|
from diffusers import DiffusionPipeline
|
||||||
|
import torch
|
||||||
|
|
||||||
|
max_memory = {0:"1GB", 1:"1GB"}
|
||||||
|
pipeline = DiffusionPipeline.from_pretrained(
|
||||||
|
"runwayml/stable-diffusion-v1-5",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
use_safetensors=True,
|
||||||
|
device_map="balanced",
|
||||||
|
+ max_memory=max_memory
|
||||||
|
)
|
||||||
|
image = pipeline("a dog").images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
If a device is not present in `max_memory`, then it will be completely ignored and will not participate in the device placement.
|
||||||
|
|
||||||
|
By default, Diffusers uses the maximum memory of all devices. If the models don't fit on the GPUs, they are offloaded to the CPU. If the CPU doesn't have enough memory, then you might see an error. In that case, you could defer to using [`~DiffusionPipeline.enable_sequential_cpu_offload`] and [`~DiffusionPipeline.enable_model_cpu_offload`].
|
||||||
|
|
||||||
|
Call [`~DiffusionPipeline.reset_device_map`] to reset the `device_map` of a pipeline. This is also necessary if you want to use methods like `to()`, [`~DiffusionPipeline.enable_sequential_cpu_offload`], and [`~DiffusionPipeline.enable_model_cpu_offload`] on a pipeline that was device-mapped.
|
||||||
|
|
||||||
|
```py
|
||||||
|
pipeline.reset_device_map()
|
||||||
|
```
|
||||||
|
|
||||||
|
Once a pipeline has been device-mapped, you can also access its device map via `hf_device_map`:
|
||||||
|
|
||||||
|
```py
|
||||||
|
print(pipeline.hf_device_map)
|
||||||
|
```
|
||||||
|
|
||||||
|
An example device map would look like so:
|
||||||
|
|
||||||
|
|
||||||
|
```bash
|
||||||
|
{'unet': 1, 'vae': 1, 'safety_checker': 0, 'text_encoder': 0}
|
||||||
|
```
|
||||||
@@ -19,13 +19,74 @@ The denoising loop of a pipeline can be modified with custom defined functions u
|
|||||||
|
|
||||||
This guide will demonstrate how callbacks work by a few features you can implement with them.
|
This guide will demonstrate how callbacks work by a few features you can implement with them.
|
||||||
|
|
||||||
|
## Official callbacks
|
||||||
|
|
||||||
|
We provide a list of callbacks you can plug into an existing pipeline and modify the denoising loop. This is the current list of official callbacks:
|
||||||
|
|
||||||
|
- `SDCFGCutoffCallback`: Disables the CFG after a certain number of steps for all SD 1.5 pipelines, including text-to-image, image-to-image, inpaint, and controlnet.
|
||||||
|
- `SDXLCFGCutoffCallback`: Disables the CFG after a certain number of steps for all SDXL pipelines, including text-to-image, image-to-image, inpaint, and controlnet.
|
||||||
|
- `IPAdapterScaleCutoffCallback`: Disables the IP Adapter after a certain number of steps for all pipelines supporting IP-Adapter.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> If you want to add a new official callback, feel free to open a [feature request](https://github.com/huggingface/diffusers/issues/new/choose) or [submit a PR](https://huggingface.co/docs/diffusers/main/en/conceptual/contribution#how-to-open-a-pr).
|
||||||
|
|
||||||
|
To set up a callback, you need to specify the number of denoising steps after which the callback comes into effect. You can do so by using either one of these two arguments
|
||||||
|
|
||||||
|
- `cutoff_step_ratio`: Float number with the ratio of the steps.
|
||||||
|
- `cutoff_step_index`: Integer number with the exact number of the step.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from diffusers import DPMSolverMultistepScheduler, StableDiffusionXLPipeline
|
||||||
|
from diffusers.callbacks import SDXLCFGCutoffCallback
|
||||||
|
|
||||||
|
|
||||||
|
callback = SDXLCFGCutoffCallback(cutoff_step_ratio=0.4)
|
||||||
|
# can also be used with cutoff_step_index
|
||||||
|
# callback = SDXLCFGCutoffCallback(cutoff_step_ratio=None, cutoff_step_index=10)
|
||||||
|
|
||||||
|
pipeline = StableDiffusionXLPipeline.from_pretrained(
|
||||||
|
"stabilityai/stable-diffusion-xl-base-1.0",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
variant="fp16",
|
||||||
|
).to("cuda")
|
||||||
|
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, use_karras_sigmas=True)
|
||||||
|
|
||||||
|
prompt = "a sports car at the road, best quality, high quality, high detail, 8k resolution"
|
||||||
|
|
||||||
|
generator = torch.Generator(device="cpu").manual_seed(2628670641)
|
||||||
|
|
||||||
|
out = pipeline(
|
||||||
|
prompt=prompt,
|
||||||
|
negative_prompt="",
|
||||||
|
guidance_scale=6.5,
|
||||||
|
num_inference_steps=25,
|
||||||
|
generator=generator,
|
||||||
|
callback_on_step_end=callback,
|
||||||
|
)
|
||||||
|
|
||||||
|
out.images[0].save("official_callback.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex gap-4">
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/without_cfg_callback.png" alt="generated image of a sports car at the road" />
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">without SDXLCFGCutoffCallback</figcaption>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/with_cfg_callback.png" alt="generated image of a a sports car at the road with cfg callback" />
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">with SDXLCFGCutoffCallback</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
## Dynamic classifier-free guidance
|
## Dynamic classifier-free guidance
|
||||||
|
|
||||||
Dynamic classifier-free guidance (CFG) is a feature that allows you to disable CFG after a certain number of inference steps which can help you save compute with minimal cost to performance. The callback function for this should have the following arguments:
|
Dynamic classifier-free guidance (CFG) is a feature that allows you to disable CFG after a certain number of inference steps which can help you save compute with minimal cost to performance. The callback function for this should have the following arguments:
|
||||||
|
|
||||||
* `pipeline` (or the pipeline instance) provides access to important properties such as `num_timesteps` and `guidance_scale`. You can modify these properties by updating the underlying attributes. For this example, you'll disable CFG by setting `pipeline._guidance_scale=0.0`.
|
- `pipeline` (or the pipeline instance) provides access to important properties such as `num_timesteps` and `guidance_scale`. You can modify these properties by updating the underlying attributes. For this example, you'll disable CFG by setting `pipeline._guidance_scale=0.0`.
|
||||||
* `step_index` and `timestep` tell you where you are in the denoising loop. Use `step_index` to turn off CFG after reaching 40% of `num_timesteps`.
|
- `step_index` and `timestep` tell you where you are in the denoising loop. Use `step_index` to turn off CFG after reaching 40% of `num_timesteps`.
|
||||||
* `callback_kwargs` is a dict that contains tensor variables you can modify during the denoising loop. It only includes variables specified in the `callback_on_step_end_tensor_inputs` argument, which is passed to the pipeline's `__call__` method. Different pipelines may use different sets of variables, so please check a pipeline's `_callback_tensor_inputs` attribute for the list of variables you can modify. Some common variables include `latents` and `prompt_embeds`. For this function, change the batch size of `prompt_embeds` after setting `guidance_scale=0.0` in order for it to work properly.
|
- `callback_kwargs` is a dict that contains tensor variables you can modify during the denoising loop. It only includes variables specified in the `callback_on_step_end_tensor_inputs` argument, which is passed to the pipeline's `__call__` method. Different pipelines may use different sets of variables, so please check a pipeline's `_callback_tensor_inputs` attribute for the list of variables you can modify. Some common variables include `latents` and `prompt_embeds`. For this function, change the batch size of `prompt_embeds` after setting `guidance_scale=0.0` in order for it to work properly.
|
||||||
|
|
||||||
Your callback function should look something like this:
|
Your callback function should look something like this:
|
||||||
|
|
||||||
|
|||||||
@@ -1,184 +0,0 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Contribute a community pipeline
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
|
|
||||||
💡 Take a look at GitHub Issue [#841](https://github.com/huggingface/diffusers/issues/841) for more context about why we're adding community pipelines to help everyone easily share their work without being slowed down.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
Community pipelines allow you to add any additional features you'd like on top of the [`DiffusionPipeline`]. The main benefit of building on top of the `DiffusionPipeline` is anyone can load and use your pipeline by only adding one more argument, making it super easy for the community to access.
|
|
||||||
|
|
||||||
This guide will show you how to create a community pipeline and explain how they work. To keep things simple, you'll create a "one-step" pipeline where the `UNet` does a single forward pass and calls the scheduler once.
|
|
||||||
|
|
||||||
## Initialize the pipeline
|
|
||||||
|
|
||||||
You should start by creating a `one_step_unet.py` file for your community pipeline. In this file, create a pipeline class that inherits from the [`DiffusionPipeline`] to be able to load model weights and the scheduler configuration from the Hub. The one-step pipeline needs a `UNet` and a scheduler, so you'll need to add these as arguments to the `__init__` function:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from diffusers import DiffusionPipeline
|
|
||||||
import torch
|
|
||||||
|
|
||||||
class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
|
|
||||||
def __init__(self, unet, scheduler):
|
|
||||||
super().__init__()
|
|
||||||
```
|
|
||||||
|
|
||||||
To ensure your pipeline and its components (`unet` and `scheduler`) can be saved with [`~DiffusionPipeline.save_pretrained`], add them to the `register_modules` function:
|
|
||||||
|
|
||||||
```diff
|
|
||||||
from diffusers import DiffusionPipeline
|
|
||||||
import torch
|
|
||||||
|
|
||||||
class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
|
|
||||||
def __init__(self, unet, scheduler):
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
+ self.register_modules(unet=unet, scheduler=scheduler)
|
|
||||||
```
|
|
||||||
|
|
||||||
Cool, the `__init__` step is done and you can move to the forward pass now! 🔥
|
|
||||||
|
|
||||||
## Define the forward pass
|
|
||||||
|
|
||||||
In the forward pass, which we recommend defining as `__call__`, you have complete creative freedom to add whatever feature you'd like. For our amazing one-step pipeline, create a random image and only call the `unet` and `scheduler` once by setting `timestep=1`:
|
|
||||||
|
|
||||||
```diff
|
|
||||||
from diffusers import DiffusionPipeline
|
|
||||||
import torch
|
|
||||||
|
|
||||||
class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
|
|
||||||
def __init__(self, unet, scheduler):
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
self.register_modules(unet=unet, scheduler=scheduler)
|
|
||||||
|
|
||||||
+ def __call__(self):
|
|
||||||
+ image = torch.randn(
|
|
||||||
+ (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
|
|
||||||
+ )
|
|
||||||
+ timestep = 1
|
|
||||||
|
|
||||||
+ model_output = self.unet(image, timestep).sample
|
|
||||||
+ scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample
|
|
||||||
|
|
||||||
+ return scheduler_output
|
|
||||||
```
|
|
||||||
|
|
||||||
That's it! 🚀 You can now run this pipeline by passing a `unet` and `scheduler` to it:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from diffusers import DDPMScheduler, UNet2DModel
|
|
||||||
|
|
||||||
scheduler = DDPMScheduler()
|
|
||||||
unet = UNet2DModel()
|
|
||||||
|
|
||||||
pipeline = UnetSchedulerOneForwardPipeline(unet=unet, scheduler=scheduler)
|
|
||||||
|
|
||||||
output = pipeline()
|
|
||||||
```
|
|
||||||
|
|
||||||
But what's even better is you can load pre-existing weights into the pipeline if the pipeline structure is identical. For example, you can load the [`google/ddpm-cifar10-32`](https://huggingface.co/google/ddpm-cifar10-32) weights into the one-step pipeline:
|
|
||||||
|
|
||||||
```python
|
|
||||||
pipeline = UnetSchedulerOneForwardPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True)
|
|
||||||
|
|
||||||
output = pipeline()
|
|
||||||
```
|
|
||||||
|
|
||||||
## Share your pipeline
|
|
||||||
|
|
||||||
Open a Pull Request on the 🧨 Diffusers [repository](https://github.com/huggingface/diffusers) to add your awesome pipeline in `one_step_unet.py` to the [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) subfolder.
|
|
||||||
|
|
||||||
Once it is merged, anyone with `diffusers >= 0.4.0` installed can use this pipeline magically 🪄 by specifying it in the `custom_pipeline` argument:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from diffusers import DiffusionPipeline
|
|
||||||
|
|
||||||
pipe = DiffusionPipeline.from_pretrained(
|
|
||||||
"google/ddpm-cifar10-32", custom_pipeline="one_step_unet", use_safetensors=True
|
|
||||||
)
|
|
||||||
pipe()
|
|
||||||
```
|
|
||||||
|
|
||||||
Another way to share your community pipeline is to upload the `one_step_unet.py` file directly to your preferred [model repository](https://huggingface.co/docs/hub/models-uploading) on the Hub. Instead of specifying the `one_step_unet.py` file, pass the model repository id to the `custom_pipeline` argument:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from diffusers import DiffusionPipeline
|
|
||||||
|
|
||||||
pipeline = DiffusionPipeline.from_pretrained(
|
|
||||||
"google/ddpm-cifar10-32", custom_pipeline="stevhliu/one_step_unet", use_safetensors=True
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
Take a look at the following table to compare the two sharing workflows to help you decide the best option for you:
|
|
||||||
|
|
||||||
| | GitHub community pipeline | HF Hub community pipeline |
|
|
||||||
|----------------|------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------|
|
|
||||||
| usage | same | same |
|
|
||||||
| review process | open a Pull Request on GitHub and undergo a review process from the Diffusers team before merging; may be slower | upload directly to a Hub repository without any review; this is the fastest workflow |
|
|
||||||
| visibility | included in the official Diffusers repository and documentation | included on your HF Hub profile and relies on your own usage/promotion to gain visibility |
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
|
|
||||||
💡 You can use whatever package you want in your community pipeline file - as long as the user has it installed, everything will work fine. Make sure you have one and only one pipeline class that inherits from `DiffusionPipeline` because this is automatically detected.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## How do community pipelines work?
|
|
||||||
|
|
||||||
A community pipeline is a class that inherits from [`DiffusionPipeline`] which means:
|
|
||||||
|
|
||||||
- It can be loaded with the [`custom_pipeline`] argument.
|
|
||||||
- The model weights and scheduler configuration are loaded from [`pretrained_model_name_or_path`].
|
|
||||||
- The code that implements a feature in the community pipeline is defined in a `pipeline.py` file.
|
|
||||||
|
|
||||||
Sometimes you can't load all the pipeline components weights from an official repository. In this case, the other components should be passed directly to the pipeline:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from diffusers import DiffusionPipeline
|
|
||||||
from transformers import CLIPImageProcessor, CLIPModel
|
|
||||||
|
|
||||||
model_id = "CompVis/stable-diffusion-v1-4"
|
|
||||||
clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
|
|
||||||
|
|
||||||
feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id)
|
|
||||||
clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16)
|
|
||||||
|
|
||||||
pipeline = DiffusionPipeline.from_pretrained(
|
|
||||||
model_id,
|
|
||||||
custom_pipeline="clip_guided_stable_diffusion",
|
|
||||||
clip_model=clip_model,
|
|
||||||
feature_extractor=feature_extractor,
|
|
||||||
scheduler=scheduler,
|
|
||||||
torch_dtype=torch.float16,
|
|
||||||
use_safetensors=True,
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
The magic behind community pipelines is contained in the following code. It allows the community pipeline to be loaded from GitHub or the Hub, and it'll be available to all 🧨 Diffusers packages.
|
|
||||||
|
|
||||||
```python
|
|
||||||
# 2. Load the pipeline class, if using custom module then load it from the Hub
|
|
||||||
# if we load from explicit class, let's use it
|
|
||||||
if custom_pipeline is not None:
|
|
||||||
pipeline_class = get_class_from_dynamic_module(
|
|
||||||
custom_pipeline, module_file=CUSTOM_PIPELINE_FILE_NAME, cache_dir=custom_pipeline
|
|
||||||
)
|
|
||||||
elif cls != DiffusionPipeline:
|
|
||||||
pipeline_class = cls
|
|
||||||
else:
|
|
||||||
diffusers_module = importlib.import_module(cls.__module__.split(".")[0])
|
|
||||||
pipeline_class = getattr(diffusers_module, config_dict["_class_name"])
|
|
||||||
```
|
|
||||||
@@ -1,58 +0,0 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Control image brightness
|
|
||||||
|
|
||||||
The Stable Diffusion pipeline is mediocre at generating images that are either very bright or dark as explained in the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) paper. The solutions proposed in the paper are currently implemented in the [`DDIMScheduler`] which you can use to improve the lighting in your images.
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
|
|
||||||
💡 Take a look at the paper linked above for more details about the proposed solutions!
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
One of the solutions is to train a model with *v prediction* and *v loss*. Add the following flag to the [`train_text_to_image.py`](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) or [`train_text_to_image_lora.py`](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py) scripts to enable `v_prediction`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
--prediction_type="v_prediction"
|
|
||||||
```
|
|
||||||
|
|
||||||
For example, let's use the [`ptx0/pseudo-journey-v2`](https://huggingface.co/ptx0/pseudo-journey-v2) checkpoint which has been finetuned with `v_prediction`.
|
|
||||||
|
|
||||||
Next, configure the following parameters in the [`DDIMScheduler`]:
|
|
||||||
|
|
||||||
1. `rescale_betas_zero_snr=True`, rescales the noise schedule to zero terminal signal-to-noise ratio (SNR)
|
|
||||||
2. `timestep_spacing="trailing"`, starts sampling from the last timestep
|
|
||||||
|
|
||||||
```py
|
|
||||||
from diffusers import DiffusionPipeline, DDIMScheduler
|
|
||||||
|
|
||||||
pipeline = DiffusionPipeline.from_pretrained("ptx0/pseudo-journey-v2", use_safetensors=True)
|
|
||||||
|
|
||||||
# switch the scheduler in the pipeline to use the DDIMScheduler
|
|
||||||
pipeline.scheduler = DDIMScheduler.from_config(
|
|
||||||
pipeline.scheduler.config, rescale_betas_zero_snr=True, timestep_spacing="trailing"
|
|
||||||
)
|
|
||||||
pipeline.to("cuda")
|
|
||||||
```
|
|
||||||
|
|
||||||
Finally, in your call to the pipeline, set `guidance_rescale` to prevent overexposure:
|
|
||||||
|
|
||||||
```py
|
|
||||||
prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k"
|
|
||||||
image = pipeline(prompt, guidance_rescale=0.7).images[0]
|
|
||||||
image
|
|
||||||
```
|
|
||||||
|
|
||||||
<div class="flex justify-center">
|
|
||||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/zero_snr.png"/>
|
|
||||||
</div>
|
|
||||||
@@ -1,119 +0,0 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Community pipelines
|
|
||||||
|
|
||||||
[[open-in-colab]]
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
|
|
||||||
For more context about the design choices behind community pipelines, please have a look at [this issue](https://github.com/huggingface/diffusers/issues/841).
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
Community pipelines allow you to get creative and build your own unique pipelines to share with the community. You can find all community pipelines in the [diffusers/examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) folder along with inference and training examples for how to use them. This guide showcases some of the community pipelines and hopefully it'll inspire you to create your own (feel free to open a PR with your own pipeline and we will merge it!).
|
|
||||||
|
|
||||||
To load a community pipeline, use the `custom_pipeline` argument in [`DiffusionPipeline`] to specify one of the files in [diffusers/examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community):
|
|
||||||
|
|
||||||
```py
|
|
||||||
from diffusers import DiffusionPipeline
|
|
||||||
|
|
||||||
pipe = DiffusionPipeline.from_pretrained(
|
|
||||||
"CompVis/stable-diffusion-v1-4", custom_pipeline="filename_in_the_community_folder", use_safetensors=True
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
If a community pipeline doesn't work as expected, please open a GitHub issue and mention the author.
|
|
||||||
|
|
||||||
You can learn more about community pipelines in the how to [load community pipelines](custom_pipeline_overview) and how to [contribute a community pipeline](contribute_pipeline) guides.
|
|
||||||
|
|
||||||
## Multilingual Stable Diffusion
|
|
||||||
|
|
||||||
The multilingual Stable Diffusion pipeline uses a pretrained [XLM-RoBERTa](https://huggingface.co/papluca/xlm-roberta-base-language-detection) to identify a language and the [mBART-large-50](https://huggingface.co/facebook/mbart-large-50-many-to-one-mmt) model to handle the translation. This allows you to generate images from text in 20 languages.
|
|
||||||
|
|
||||||
```py
|
|
||||||
import torch
|
|
||||||
from diffusers import DiffusionPipeline
|
|
||||||
from diffusers.utils import make_image_grid
|
|
||||||
from transformers import (
|
|
||||||
pipeline,
|
|
||||||
MBart50TokenizerFast,
|
|
||||||
MBartForConditionalGeneration,
|
|
||||||
)
|
|
||||||
|
|
||||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
||||||
device_dict = {"cuda": 0, "cpu": -1}
|
|
||||||
|
|
||||||
# add language detection pipeline
|
|
||||||
language_detection_model_ckpt = "papluca/xlm-roberta-base-language-detection"
|
|
||||||
language_detection_pipeline = pipeline("text-classification",
|
|
||||||
model=language_detection_model_ckpt,
|
|
||||||
device=device_dict[device])
|
|
||||||
|
|
||||||
# add model for language translation
|
|
||||||
translation_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-one-mmt")
|
|
||||||
translation_model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-one-mmt").to(device)
|
|
||||||
|
|
||||||
diffuser_pipeline = DiffusionPipeline.from_pretrained(
|
|
||||||
"CompVis/stable-diffusion-v1-4",
|
|
||||||
custom_pipeline="multilingual_stable_diffusion",
|
|
||||||
detection_pipeline=language_detection_pipeline,
|
|
||||||
translation_model=translation_model,
|
|
||||||
translation_tokenizer=translation_tokenizer,
|
|
||||||
torch_dtype=torch.float16,
|
|
||||||
)
|
|
||||||
|
|
||||||
diffuser_pipeline.enable_attention_slicing()
|
|
||||||
diffuser_pipeline = diffuser_pipeline.to(device)
|
|
||||||
|
|
||||||
prompt = ["a photograph of an astronaut riding a horse",
|
|
||||||
"Una casa en la playa",
|
|
||||||
"Ein Hund, der Orange isst",
|
|
||||||
"Un restaurant parisien"]
|
|
||||||
|
|
||||||
images = diffuser_pipeline(prompt).images
|
|
||||||
make_image_grid(images, rows=2, cols=2)
|
|
||||||
```
|
|
||||||
|
|
||||||
<div class="flex justify-center">
|
|
||||||
<img src="https://user-images.githubusercontent.com/4313860/198328706-295824a4-9856-4ce5-8e66-278ceb42fd29.png"/>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
## MagicMix
|
|
||||||
|
|
||||||
[MagicMix](https://huggingface.co/papers/2210.16056) is a pipeline that can mix an image and text prompt to generate a new image that preserves the image structure. The `mix_factor` determines how much influence the prompt has on the layout generation, `kmin` controls the number of steps during the content generation process, and `kmax` determines how much information is kept in the layout of the original image.
|
|
||||||
|
|
||||||
```py
|
|
||||||
from diffusers import DiffusionPipeline, DDIMScheduler
|
|
||||||
from diffusers.utils import load_image, make_image_grid
|
|
||||||
|
|
||||||
pipeline = DiffusionPipeline.from_pretrained(
|
|
||||||
"CompVis/stable-diffusion-v1-4",
|
|
||||||
custom_pipeline="magic_mix",
|
|
||||||
scheduler=DDIMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler"),
|
|
||||||
).to('cuda')
|
|
||||||
|
|
||||||
img = load_image("https://user-images.githubusercontent.com/59410571/209578593-141467c7-d831-4792-8b9a-b17dc5e47816.jpg")
|
|
||||||
mix_img = pipeline(img, prompt="bed", kmin=0.3, kmax=0.5, mix_factor=0.5)
|
|
||||||
make_image_grid([img, mix_img], rows=1, cols=2)
|
|
||||||
```
|
|
||||||
|
|
||||||
<div class="flex gap-4">
|
|
||||||
<div>
|
|
||||||
<img class="rounded-xl" src="https://user-images.githubusercontent.com/59410571/209578593-141467c7-d831-4792-8b9a-b17dc5e47816.jpg" />
|
|
||||||
<figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<img class="rounded-xl" src="https://user-images.githubusercontent.com/59410571/209578602-70f323fa-05b7-4dd6-b055-e40683e37914.jpg" />
|
|
||||||
<figcaption class="mt-2 text-center text-sm text-gray-500">image and text prompt mix</figcaption>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
@@ -16,11 +16,19 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
## Community pipelines
|
## Community pipelines
|
||||||
|
|
||||||
|
> [!TIP] Take a look at GitHub Issue [#841](https://github.com/huggingface/diffusers/issues/841) for more context about why we're adding community pipelines to help everyone easily share their work without being slowed down.
|
||||||
|
|
||||||
Community pipelines are any [`DiffusionPipeline`] class that are different from the original paper implementation (for example, the [`StableDiffusionControlNetPipeline`] corresponds to the [Text-to-Image Generation with ControlNet Conditioning](https://arxiv.org/abs/2302.05543) paper). They provide additional functionality or extend the original implementation of a pipeline.
|
Community pipelines are any [`DiffusionPipeline`] class that are different from the original paper implementation (for example, the [`StableDiffusionControlNetPipeline`] corresponds to the [Text-to-Image Generation with ControlNet Conditioning](https://arxiv.org/abs/2302.05543) paper). They provide additional functionality or extend the original implementation of a pipeline.
|
||||||
|
|
||||||
There are many cool community pipelines like [Marigold Depth Estimation](https://github.com/huggingface/diffusers/tree/main/examples/community#marigold-depth-estimation) or [InstantID](https://github.com/huggingface/diffusers/tree/main/examples/community#instantid-pipeline), and you can find all the official community pipelines [here](https://github.com/huggingface/diffusers/tree/main/examples/community).
|
There are many cool community pipelines like [Marigold Depth Estimation](https://github.com/huggingface/diffusers/tree/main/examples/community#marigold-depth-estimation) or [InstantID](https://github.com/huggingface/diffusers/tree/main/examples/community#instantid-pipeline), and you can find all the official community pipelines [here](https://github.com/huggingface/diffusers/tree/main/examples/community).
|
||||||
|
|
||||||
There are two types of community pipelines, those stored on the Hugging Face Hub and those stored on Diffusers GitHub repository. Hub pipelines are completely customizable (scheduler, models, pipeline code, etc.) while Diffusers GitHub pipelines are only limited to custom pipeline code. Refer to this [table](./contribute_pipeline#share-your-pipeline) for a more detailed comparison of Hub vs GitHub community pipelines.
|
There are two types of community pipelines, those stored on the Hugging Face Hub and those stored on Diffusers GitHub repository. Hub pipelines are completely customizable (scheduler, models, pipeline code, etc.) while Diffusers GitHub pipelines are only limited to custom pipeline code.
|
||||||
|
|
||||||
|
| | GitHub community pipeline | HF Hub community pipeline |
|
||||||
|
|----------------|------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------|
|
||||||
|
| usage | same | same |
|
||||||
|
| review process | open a Pull Request on GitHub and undergo a review process from the Diffusers team before merging; may be slower | upload directly to a Hub repository without any review; this is the fastest workflow |
|
||||||
|
| visibility | included in the official Diffusers repository and documentation | included on your HF Hub profile and relies on your own usage/promotion to gain visibility |
|
||||||
|
|
||||||
<hfoptions id="community">
|
<hfoptions id="community">
|
||||||
<hfoption id="Hub pipelines">
|
<hfoption id="Hub pipelines">
|
||||||
@@ -161,6 +169,97 @@ out_lpw
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
## Example community pipelines
|
||||||
|
|
||||||
|
Community pipelines are a really fun and creative way to extend the capabilities of the original pipeline with new and unique features. You can find all community pipelines in the [diffusers/examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) folder with inference and training examples for how to use them.
|
||||||
|
|
||||||
|
This section showcases a couple of the community pipelines and hopefully it'll inspire you to create your own (feel free to open a PR for your community pipeline and ping us for a review)!
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> The [`~DiffusionPipeline.from_pipe`] method is particularly useful for loading community pipelines because many of them don't have pretrained weights and add a feature on top of an existing pipeline like Stable Diffusion or Stable Diffusion XL. You can learn more about the [`~DiffusionPipeline.from_pipe`] method in the [Load with from_pipe](custom_pipeline_overview#load-with-from_pipe) section.
|
||||||
|
|
||||||
|
<hfoptions id="community">
|
||||||
|
<hfoption id="Marigold">
|
||||||
|
|
||||||
|
[Marigold](https://marigoldmonodepth.github.io/) is a depth estimation diffusion pipeline that uses the rich existing and inherent visual knowledge in diffusion models. It takes an input image and denoises and decodes it into a depth map. Marigold performs well even on images it hasn't seen before.
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from PIL import Image
|
||||||
|
from diffusers import DiffusionPipeline
|
||||||
|
from diffusers.utils import load_image
|
||||||
|
|
||||||
|
pipeline = DiffusionPipeline.from_pretrained(
|
||||||
|
"prs-eth/marigold-lcm-v1-0",
|
||||||
|
custom_pipeline="marigold_depth_estimation",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
variant="fp16",
|
||||||
|
)
|
||||||
|
|
||||||
|
pipeline.to("cuda")
|
||||||
|
image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/community-marigold.png")
|
||||||
|
output = pipeline(
|
||||||
|
image,
|
||||||
|
denoising_steps=4,
|
||||||
|
ensemble_size=5,
|
||||||
|
processing_res=768,
|
||||||
|
match_input_res=True,
|
||||||
|
batch_size=0,
|
||||||
|
seed=33,
|
||||||
|
color_map="Spectral",
|
||||||
|
show_progress_bar=True,
|
||||||
|
)
|
||||||
|
depth_colored: Image.Image = output.depth_colored
|
||||||
|
depth_colored.save("./depth_colored.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex flex-row gap-4">
|
||||||
|
<div class="flex-1">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/community-marigold.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption>
|
||||||
|
</div>
|
||||||
|
<div class="flex-1">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/marigold-depth.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">colorized depth image</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="HD-Painter">
|
||||||
|
|
||||||
|
[HD-Painter](https://hf.co/papers/2312.14091) is a high-resolution inpainting pipeline. It introduces a *Prompt-Aware Introverted Attention (PAIntA)* layer to better align a prompt with the area to be inpainted, and *Reweighting Attention Score Guidance (RASG)* to keep the latents more prompt-aligned and within their trained domain to generate realistc images.
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from diffusers import DiffusionPipeline, DDIMScheduler
|
||||||
|
from diffusers.utils import load_image
|
||||||
|
|
||||||
|
pipeline = DiffusionPipeline.from_pretrained(
|
||||||
|
"Lykon/dreamshaper-8-inpainting",
|
||||||
|
custom_pipeline="hd_painter"
|
||||||
|
)
|
||||||
|
pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
|
||||||
|
init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/hd-painter.jpg")
|
||||||
|
mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/hd-painter-mask.png")
|
||||||
|
prompt = "football"
|
||||||
|
image = pipeline(prompt, init_image, mask_image, use_rasg=True, use_painta=True, generator=torch.manual_seed(0)).images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex flex-row gap-4">
|
||||||
|
<div class="flex-1">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/hd-painter.jpg"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption>
|
||||||
|
</div>
|
||||||
|
<div class="flex-1">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/hd-painter-output.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
## Community components
|
## Community components
|
||||||
|
|
||||||
Community components allow users to build pipelines that may have customized components that are not a part of Diffusers. If your pipeline has custom components that Diffusers doesn't already support, you need to provide their implementations as Python modules. These customized components could be a VAE, UNet, and scheduler. In most cases, the text encoder is imported from the Transformers library. The pipeline code itself can also be customized.
|
Community components allow users to build pipelines that may have customized components that are not a part of Diffusers. If your pipeline has custom components that Diffusers doesn't already support, you need to provide their implementations as Python modules. These customized components could be a VAE, UNet, and scheduler. In most cases, the text encoder is imported from the Transformers library. The pipeline code itself can also be customized.
|
||||||
|
|||||||
@@ -1,133 +0,0 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Distilled Stable Diffusion inference
|
|
||||||
|
|
||||||
[[open-in-colab]]
|
|
||||||
|
|
||||||
Stable Diffusion inference can be a computationally intensive process because it must iteratively denoise the latents to generate an image. To reduce the computational burden, you can use a *distilled* version of the Stable Diffusion model from [Nota AI](https://huggingface.co/nota-ai). The distilled version of their Stable Diffusion model eliminates some of the residual and attention blocks from the UNet, reducing the model size by 51% and improving latency on CPU/GPU by 43%.
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
|
|
||||||
Read this [blog post](https://huggingface.co/blog/sd_distillation) to learn more about how knowledge distillation training works to produce a faster, smaller, and cheaper generative model.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
Let's load the distilled Stable Diffusion model and compare it against the original Stable Diffusion model:
|
|
||||||
|
|
||||||
```py
|
|
||||||
from diffusers import StableDiffusionPipeline
|
|
||||||
import torch
|
|
||||||
|
|
||||||
distilled = StableDiffusionPipeline.from_pretrained(
|
|
||||||
"nota-ai/bk-sdm-small", torch_dtype=torch.float16, use_safetensors=True,
|
|
||||||
).to("cuda")
|
|
||||||
|
|
||||||
original = StableDiffusionPipeline.from_pretrained(
|
|
||||||
"CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, use_safetensors=True,
|
|
||||||
).to("cuda")
|
|
||||||
```
|
|
||||||
|
|
||||||
Given a prompt, get the inference time for the original model:
|
|
||||||
|
|
||||||
```py
|
|
||||||
import time
|
|
||||||
|
|
||||||
seed = 2023
|
|
||||||
generator = torch.manual_seed(seed)
|
|
||||||
|
|
||||||
NUM_ITERS_TO_RUN = 3
|
|
||||||
NUM_INFERENCE_STEPS = 25
|
|
||||||
NUM_IMAGES_PER_PROMPT = 4
|
|
||||||
|
|
||||||
prompt = "a golden vase with different flowers"
|
|
||||||
|
|
||||||
start = time.time_ns()
|
|
||||||
for _ in range(NUM_ITERS_TO_RUN):
|
|
||||||
images = original(
|
|
||||||
prompt,
|
|
||||||
num_inference_steps=NUM_INFERENCE_STEPS,
|
|
||||||
generator=generator,
|
|
||||||
num_images_per_prompt=NUM_IMAGES_PER_PROMPT
|
|
||||||
).images
|
|
||||||
end = time.time_ns()
|
|
||||||
original_sd = f"{(end - start) / 1e6:.1f}"
|
|
||||||
|
|
||||||
print(f"Execution time -- {original_sd} ms\n")
|
|
||||||
"Execution time -- 45781.5 ms"
|
|
||||||
```
|
|
||||||
|
|
||||||
Time the distilled model inference:
|
|
||||||
|
|
||||||
```py
|
|
||||||
start = time.time_ns()
|
|
||||||
for _ in range(NUM_ITERS_TO_RUN):
|
|
||||||
images = distilled(
|
|
||||||
prompt,
|
|
||||||
num_inference_steps=NUM_INFERENCE_STEPS,
|
|
||||||
generator=generator,
|
|
||||||
num_images_per_prompt=NUM_IMAGES_PER_PROMPT
|
|
||||||
).images
|
|
||||||
end = time.time_ns()
|
|
||||||
|
|
||||||
distilled_sd = f"{(end - start) / 1e6:.1f}"
|
|
||||||
print(f"Execution time -- {distilled_sd} ms\n")
|
|
||||||
"Execution time -- 29884.2 ms"
|
|
||||||
```
|
|
||||||
|
|
||||||
<div class="flex gap-4">
|
|
||||||
<div>
|
|
||||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/original_sd.png"/>
|
|
||||||
<figcaption class="mt-2 text-center text-sm text-gray-500">original Stable Diffusion (45781.5 ms)</figcaption>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/distilled_sd.png"/>
|
|
||||||
<figcaption class="mt-2 text-center text-sm text-gray-500">distilled Stable Diffusion (29884.2 ms)</figcaption>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
## Tiny AutoEncoder
|
|
||||||
|
|
||||||
To speed inference up even more, use a tiny distilled version of the [Stable Diffusion VAE](https://huggingface.co/sayakpaul/taesdxl-diffusers) to denoise the latents into images. Replace the VAE in the distilled Stable Diffusion model with the tiny VAE:
|
|
||||||
|
|
||||||
```py
|
|
||||||
from diffusers import AutoencoderTiny
|
|
||||||
|
|
||||||
distilled.vae = AutoencoderTiny.from_pretrained(
|
|
||||||
"sayakpaul/taesd-diffusers", torch_dtype=torch.float16, use_safetensors=True,
|
|
||||||
).to("cuda")
|
|
||||||
```
|
|
||||||
|
|
||||||
Time the distilled model and distilled VAE inference:
|
|
||||||
|
|
||||||
```py
|
|
||||||
start = time.time_ns()
|
|
||||||
for _ in range(NUM_ITERS_TO_RUN):
|
|
||||||
images = distilled(
|
|
||||||
prompt,
|
|
||||||
num_inference_steps=NUM_INFERENCE_STEPS,
|
|
||||||
generator=generator,
|
|
||||||
num_images_per_prompt=NUM_IMAGES_PER_PROMPT
|
|
||||||
).images
|
|
||||||
end = time.time_ns()
|
|
||||||
|
|
||||||
distilled_tiny_sd = f"{(end - start) / 1e6:.1f}"
|
|
||||||
print(f"Execution time -- {distilled_tiny_sd} ms\n")
|
|
||||||
"Execution time -- 27165.7 ms"
|
|
||||||
```
|
|
||||||
|
|
||||||
<div class="flex justify-center">
|
|
||||||
<div>
|
|
||||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/distilled_sd_vae.png" />
|
|
||||||
<figcaption class="mt-2 text-center text-sm text-gray-500">distilled Stable Diffusion + Tiny AutoEncoder (27165.7 ms)</figcaption>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
@@ -1,135 +0,0 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Improve generation quality with FreeU
|
|
||||||
|
|
||||||
[[open-in-colab]]
|
|
||||||
|
|
||||||
The UNet is responsible for denoising during the reverse diffusion process, and there are two distinct features in its architecture:
|
|
||||||
|
|
||||||
1. Backbone features primarily contribute to the denoising process
|
|
||||||
2. Skip features mainly introduce high-frequency features into the decoder module and can make the network overlook the semantics in the backbone features
|
|
||||||
|
|
||||||
However, the skip connection can sometimes introduce unnatural image details. [FreeU](https://hf.co/papers/2309.11497) is a technique for improving image quality by rebalancing the contributions from the UNet’s skip connections and backbone feature maps.
|
|
||||||
|
|
||||||
FreeU is applied during inference and it does not require any additional training. The technique works for different tasks such as text-to-image, image-to-image, and text-to-video.
|
|
||||||
|
|
||||||
In this guide, you will apply FreeU to the [`StableDiffusionPipeline`], [`StableDiffusionXLPipeline`], and [`TextToVideoSDPipeline`]. You need to install Diffusers from source to run the examples below.
|
|
||||||
|
|
||||||
## StableDiffusionPipeline
|
|
||||||
|
|
||||||
Load the pipeline:
|
|
||||||
|
|
||||||
```py
|
|
||||||
from diffusers import DiffusionPipeline
|
|
||||||
import torch
|
|
||||||
|
|
||||||
pipeline = DiffusionPipeline.from_pretrained(
|
|
||||||
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, safety_checker=None
|
|
||||||
).to("cuda")
|
|
||||||
```
|
|
||||||
|
|
||||||
Then enable the FreeU mechanism with the FreeU-specific hyperparameters. These values are scaling factors for the backbone and skip features.
|
|
||||||
|
|
||||||
```py
|
|
||||||
pipeline.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4)
|
|
||||||
```
|
|
||||||
|
|
||||||
The values above are from the official FreeU [code repository](https://github.com/ChenyangSi/FreeU) where you can also find [reference hyperparameters](https://github.com/ChenyangSi/FreeU#range-for-more-parameters) for different models.
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
|
|
||||||
Disable the FreeU mechanism by calling `disable_freeu()` on a pipeline.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
And then run inference:
|
|
||||||
|
|
||||||
```py
|
|
||||||
prompt = "A squirrel eating a burger"
|
|
||||||
seed = 2023
|
|
||||||
image = pipeline(prompt, generator=torch.manual_seed(seed)).images[0]
|
|
||||||
image
|
|
||||||
```
|
|
||||||
|
|
||||||
The figure below compares non-FreeU and FreeU results respectively for the same hyperparameters used above (`prompt` and `seed`):
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
|
|
||||||
Let's see how Stable Diffusion 2 results are impacted:
|
|
||||||
|
|
||||||
```py
|
|
||||||
from diffusers import DiffusionPipeline
|
|
||||||
import torch
|
|
||||||
|
|
||||||
pipeline = DiffusionPipeline.from_pretrained(
|
|
||||||
"stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16, safety_checker=None
|
|
||||||
).to("cuda")
|
|
||||||
|
|
||||||
prompt = "A squirrel eating a burger"
|
|
||||||
seed = 2023
|
|
||||||
|
|
||||||
pipeline.enable_freeu(s1=0.9, s2=0.2, b1=1.1, b2=1.2)
|
|
||||||
image = pipeline(prompt, generator=torch.manual_seed(seed)).images[0]
|
|
||||||
image
|
|
||||||
```
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
## Stable Diffusion XL
|
|
||||||
|
|
||||||
Finally, let's take a look at how FreeU affects Stable Diffusion XL results:
|
|
||||||
|
|
||||||
```py
|
|
||||||
from diffusers import DiffusionPipeline
|
|
||||||
import torch
|
|
||||||
|
|
||||||
pipeline = DiffusionPipeline.from_pretrained(
|
|
||||||
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16,
|
|
||||||
).to("cuda")
|
|
||||||
|
|
||||||
prompt = "A squirrel eating a burger"
|
|
||||||
seed = 2023
|
|
||||||
|
|
||||||
# Comes from
|
|
||||||
# https://wandb.ai/nasirk24/UNET-FreeU-SDXL/reports/FreeU-SDXL-Optimal-Parameters--Vmlldzo1NDg4NTUw
|
|
||||||
pipeline.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
|
||||||
image = pipeline(prompt, generator=torch.manual_seed(seed)).images[0]
|
|
||||||
image
|
|
||||||
```
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
## Text-to-video generation
|
|
||||||
|
|
||||||
FreeU can also be used to improve video quality:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from diffusers import DiffusionPipeline
|
|
||||||
from diffusers.utils import export_to_video
|
|
||||||
import torch
|
|
||||||
|
|
||||||
model_id = "cerspense/zeroscope_v2_576w"
|
|
||||||
pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
|
|
||||||
|
|
||||||
prompt = "an astronaut riding a horse on mars"
|
|
||||||
seed = 2023
|
|
||||||
|
|
||||||
# The values come from
|
|
||||||
# https://github.com/lyn-rgb/FreeU_Diffusers#video-pipelines
|
|
||||||
pipe.enable_freeu(b1=1.2, b2=1.4, s1=0.9, s2=0.2)
|
|
||||||
video_frames = pipe(prompt, height=320, width=576, num_frames=30, generator=torch.manual_seed(seed)).frames[0]
|
|
||||||
export_to_video(video_frames, "astronaut_rides_horse.mp4")
|
|
||||||
```
|
|
||||||
|
|
||||||
Thanks to [kadirnar](https://github.com/kadirnar/) for helping to integrate the feature, and to [justindujardin](https://github.com/justindujardin) for the helpful discussions.
|
|
||||||
146
docs/source/en/using-diffusers/image_quality.md
Normal file
146
docs/source/en/using-diffusers/image_quality.md
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Controlling image quality
|
||||||
|
|
||||||
|
The components of a diffusion model, like the UNet and scheduler, can be optimized to improve the quality of generated images leading to better details. These techniques are especially useful if you don't have the resources to simply use a larger model for inference. You can enable these techniques during inference without any additional training.
|
||||||
|
|
||||||
|
This guide will show you how to turn these techniques on in your pipeline and how to configure them to improve the quality of your generated images.
|
||||||
|
|
||||||
|
## Details
|
||||||
|
|
||||||
|
[FreeU](https://hf.co/papers/2309.11497) improves image details by rebalancing the UNet's backbone and skip connection weights. The skip connections can cause the model to overlook some of the backbone semantics which may lead to unnatural image details in the generated image. This technique does not require any additional training and can be applied on the fly during inference for tasks like image-to-image and text-to-video.
|
||||||
|
|
||||||
|
Use the [`~pipelines.StableDiffusionMixin.enable_freeu`] method on your pipeline and configure the scaling factors for the backbone (`b1` and `b2`) and skip connections (`s1` and `s2`). The number after each scaling factor corresponds to the stage in the UNet where the factor is applied. Take a look at the [FreeU](https://github.com/ChenyangSi/FreeU#parameters) repository for reference hyperparameters for different models.
|
||||||
|
|
||||||
|
<hfoptions id="freeu">
|
||||||
|
<hfoption id="Stable Diffusion v1-5">
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from diffusers import DiffusionPipeline
|
||||||
|
|
||||||
|
pipeline = DiffusionPipeline.from_pretrained(
|
||||||
|
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, safety_checker=None
|
||||||
|
).to("cuda")
|
||||||
|
pipeline.enable_freeu(s1=0.9, s2=0.2, b1=1.5, b2=1.6)
|
||||||
|
generator = torch.Generator(device="cpu").manual_seed(33)
|
||||||
|
prompt = ""
|
||||||
|
image = pipeline(prompt, generator=generator).images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex gap-4">
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdv15-no-freeu.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">FreeU disabled</figcaption>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdv15-freeu.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">FreeU enabled</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="Stable Diffusion v2-1">
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from diffusers import DiffusionPipeline
|
||||||
|
|
||||||
|
pipeline = DiffusionPipeline.from_pretrained(
|
||||||
|
"stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16, safety_checker=None
|
||||||
|
).to("cuda")
|
||||||
|
pipeline.enable_freeu(s1=0.9, s2=0.2, b1=1.4, b2=1.6)
|
||||||
|
generator = torch.Generator(device="cpu").manual_seed(80)
|
||||||
|
prompt = "A squirrel eating a burger"
|
||||||
|
image = pipeline(prompt, generator=generator).images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex gap-4">
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdv21-no-freeu.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">FreeU disabled</figcaption>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdv21-freeu.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">FreeU enabled</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="Stable Diffusion XL">
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from diffusers import DiffusionPipeline
|
||||||
|
|
||||||
|
pipeline = DiffusionPipeline.from_pretrained(
|
||||||
|
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16,
|
||||||
|
).to("cuda")
|
||||||
|
pipeline.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4)
|
||||||
|
generator = torch.Generator(device="cpu").manual_seed(13)
|
||||||
|
prompt = "A squirrel eating a burger"
|
||||||
|
image = pipeline(prompt, generator=generator).images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex gap-4">
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-no-freeu.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">FreeU disabled</figcaption>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-freeu.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">FreeU enabled</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="Zeroscope">
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from diffusers import DiffusionPipeline
|
||||||
|
from diffusers.utils import export_to_video
|
||||||
|
|
||||||
|
pipeline = DiffusionPipeline.from_pretrained(
|
||||||
|
"damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16
|
||||||
|
).to("cuda")
|
||||||
|
# values come from https://github.com/lyn-rgb/FreeU_Diffusers#video-pipelines
|
||||||
|
pipeline.enable_freeu(b1=1.2, b2=1.4, s1=0.9, s2=0.2)
|
||||||
|
prompt = "Confident teddy bear surfer rides the wave in the tropics"
|
||||||
|
generator = torch.Generator(device="cpu").manual_seed(47)
|
||||||
|
video_frames = pipeline(prompt, generator=generator).frames[0]
|
||||||
|
export_to_video(video_frames, "teddy_bear.mp4", fps=10)
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex gap-4">
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/video-no-freeu.gif"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">FreeU disabled</figcaption>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/video-freeu.gif"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">FreeU enabled</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
|
Call the [`pipelines.StableDiffusionMixin.disable_freeu`] method to disable FreeU.
|
||||||
|
|
||||||
|
```py
|
||||||
|
pipeline.disable_freeu()
|
||||||
|
```
|
||||||
@@ -10,29 +10,30 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
|
|||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
[[open-in-colab]]
|
|
||||||
|
|
||||||
# Latent Consistency Model
|
# Latent Consistency Model
|
||||||
|
|
||||||
Latent Consistency Models (LCM) enable quality image generation in typically 2-4 steps making it possible to use diffusion models in almost real-time settings.
|
[[open-in-colab]]
|
||||||
|
|
||||||
From the [official website](https://latent-consistency-models.github.io/):
|
[Latent Consistency Models (LCMs)](https://hf.co/papers/2310.04378) enable fast high-quality image generation by directly predicting the reverse diffusion process in the latent rather than pixel space. In other words, LCMs try to predict the noiseless image from the noisy image in contrast to typical diffusion models that iteratively remove noise from the noisy image. By avoiding the iterative sampling process, LCMs are able to generate high-quality images in 2-4 steps instead of 20-30 steps.
|
||||||
|
|
||||||
> LCMs can be distilled from any pre-trained Stable Diffusion (SD) in only 4,000 training steps (~32 A100 GPU Hours) for generating high quality 768 x 768 resolution images in 2~4 steps or even one step, significantly accelerating text-to-image generation. We employ LCM to distill the Dreamshaper-V7 version of SD in just 4,000 training iterations.
|
LCMs are distilled from pretrained models which requires ~32 hours of A100 compute. To speed this up, [LCM-LoRAs](https://hf.co/papers/2311.05556) train a [LoRA adapter](https://huggingface.co/docs/peft/conceptual_guides/adapter#low-rank-adaptation-lora) which have much fewer parameters to train compared to the full model. The LCM-LoRA can be plugged into a diffusion model once it has been trained.
|
||||||
|
|
||||||
For a more technical overview of LCMs, refer to [the paper](https://huggingface.co/papers/2310.04378).
|
This guide will show you how to use LCMs and LCM-LoRAs for fast inference on tasks and how to use them with other adapters like ControlNet or T2I-Adapter.
|
||||||
|
|
||||||
LCM distilled models are available for [stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5), [stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0), and the [SSD-1B](https://huggingface.co/segmind/SSD-1B) model. All the checkpoints can be found in this [collection](https://huggingface.co/collections/latent-consistency/latent-consistency-models-weights-654ce61a95edd6dffccef6a8).
|
> [!TIP]
|
||||||
|
> LCMs and LCM-LoRAs are available for Stable Diffusion v1.5, Stable Diffusion XL, and the SSD-1B model. You can find their checkpoints on the [Latent Consistency](https://hf.co/collections/latent-consistency/latent-consistency-models-weights-654ce61a95edd6dffccef6a8) Collections.
|
||||||
This guide shows how to perform inference with LCMs for
|
|
||||||
- text-to-image
|
|
||||||
- image-to-image
|
|
||||||
- combined with style LoRAs
|
|
||||||
- ControlNet/T2I-Adapter
|
|
||||||
|
|
||||||
## Text-to-image
|
## Text-to-image
|
||||||
|
|
||||||
You'll use the [`StableDiffusionXLPipeline`] pipeline with the [`LCMScheduler`] and then load the LCM-LoRA. Together with the LCM-LoRA and the scheduler, the pipeline enables a fast inference workflow, overcoming the slow iterative nature of diffusion models.
|
<hfoptions id="lcm-text2img">
|
||||||
|
<hfoption id="LCM">
|
||||||
|
|
||||||
|
To use LCMs, you need to load the LCM checkpoint for your supported model into [`UNet2DConditionModel`] and replace the scheduler with the [`LCMScheduler`]. Then you can use the pipeline as usual, and pass a text prompt to generate an image in just 4 steps.
|
||||||
|
|
||||||
|
A couple of notes to keep in mind when using LCMs are:
|
||||||
|
|
||||||
|
* Typically, batch size is doubled inside the pipeline for classifier-free guidance. But LCM applies guidance with guidance embeddings and doesn't need to double the batch size, which leads to faster inference. The downside is that negative prompts don't work with LCM because they don't have any effect on the denoising process.
|
||||||
|
* The ideal range for `guidance_scale` is [3., 13.] because that is what the UNet was trained with. However, disabling `guidance_scale` with a value of 1.0 is also effective in most cases.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, LCMScheduler
|
from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, LCMScheduler
|
||||||
@@ -49,31 +50,69 @@ pipe = StableDiffusionXLPipeline.from_pretrained(
|
|||||||
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
||||||
|
|
||||||
prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k"
|
prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k"
|
||||||
|
|
||||||
generator = torch.manual_seed(0)
|
generator = torch.manual_seed(0)
|
||||||
image = pipe(
|
image = pipe(
|
||||||
prompt=prompt, num_inference_steps=4, generator=generator, guidance_scale=8.0
|
prompt=prompt, num_inference_steps=4, generator=generator, guidance_scale=8.0
|
||||||
).images[0]
|
).images[0]
|
||||||
|
image
|
||||||
```
|
```
|
||||||
|
|
||||||

|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_full_sdxl_t2i.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
Notice that we use only 4 steps for generation which is way less than what's typically used for standard SDXL.
|
</hfoption>
|
||||||
|
<hfoption id="LCM-LoRA">
|
||||||
|
|
||||||
Some details to keep in mind:
|
To use LCM-LoRAs, you need to replace the scheduler with the [`LCMScheduler`] and load the LCM-LoRA weights with the [`~loaders.LoraLoaderMixin.load_lora_weights`] method. Then you can use the pipeline as usual, and pass a text prompt to generate an image in just 4 steps.
|
||||||
|
|
||||||
* To perform classifier-free guidance, batch size is usually doubled inside the pipeline. LCM, however, applies guidance using guidance embeddings, so the batch size does not have to be doubled in this case. This leads to a faster inference time, with the drawback that negative prompts don't have any effect on the denoising process.
|
A couple of notes to keep in mind when using LCM-LoRAs are:
|
||||||
* The UNet was trained using the [3., 13.] guidance scale range. So, that is the ideal range for `guidance_scale`. However, disabling `guidance_scale` using a value of 1.0 is also effective in most cases.
|
|
||||||
|
|
||||||
|
* Typically, batch size is doubled inside the pipeline for classifier-free guidance. But LCM applies guidance with guidance embeddings and doesn't need to double the batch size, which leads to faster inference. The downside is that negative prompts don't work with LCM because they don't have any effect on the denoising process.
|
||||||
|
* You could use guidance with LCM-LoRAs, but it is very sensitive to high `guidance_scale` values and can lead to artifacts in the generated image. The best values we've found are between [1.0, 2.0].
|
||||||
|
* Replace [stabilityai/stable-diffusion-xl-base-1.0](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0) with any finetuned model. For example, try using the [animagine-xl](https://huggingface.co/Linaqruf/animagine-xl) checkpoint to generate anime images with SDXL.
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from diffusers import DiffusionPipeline, LCMScheduler
|
||||||
|
|
||||||
|
pipe = DiffusionPipeline.from_pretrained(
|
||||||
|
"stabilityai/stable-diffusion-xl-base-1.0",
|
||||||
|
variant="fp16",
|
||||||
|
torch_dtype=torch.float16
|
||||||
|
).to("cuda")
|
||||||
|
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
||||||
|
pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
|
||||||
|
|
||||||
|
prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k"
|
||||||
|
generator = torch.manual_seed(42)
|
||||||
|
image = pipe(
|
||||||
|
prompt=prompt, num_inference_steps=4, generator=generator, guidance_scale=1.0
|
||||||
|
).images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdxl_t2i.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
## Image-to-image
|
## Image-to-image
|
||||||
|
|
||||||
LCMs can be applied to image-to-image tasks too. For this example, we'll use the [LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) model, but the same steps can be applied to other LCM models as well.
|
<hfoptions id="lcm-img2img">
|
||||||
|
<hfoption id="LCM">
|
||||||
|
|
||||||
|
To use LCMs for image-to-image, you need to load the LCM checkpoint for your supported model into [`UNet2DConditionModel`] and replace the scheduler with the [`LCMScheduler`]. Then you can use the pipeline as usual, and pass a text prompt and initial image to generate an image in just 4 steps.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Experiment with different values for `num_inference_steps`, `strength`, and `guidance_scale` to get the best results.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
from diffusers import AutoPipelineForImage2Image, UNet2DConditionModel, LCMScheduler
|
from diffusers import AutoPipelineForImage2Image, UNet2DConditionModel, LCMScheduler
|
||||||
from diffusers.utils import make_image_grid, load_image
|
from diffusers.utils import load_image
|
||||||
|
|
||||||
unet = UNet2DConditionModel.from_pretrained(
|
unet = UNet2DConditionModel.from_pretrained(
|
||||||
"SimianLuo/LCM_Dreamshaper_v7",
|
"SimianLuo/LCM_Dreamshaper_v7",
|
||||||
@@ -89,12 +128,8 @@ pipe = AutoPipelineForImage2Image.from_pretrained(
|
|||||||
).to("cuda")
|
).to("cuda")
|
||||||
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
||||||
|
|
||||||
# prepare image
|
init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png")
|
||||||
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"
|
|
||||||
init_image = load_image(url)
|
|
||||||
prompt = "Astronauts in a jungle, cold color palette, muted colors, detailed, 8k"
|
prompt = "Astronauts in a jungle, cold color palette, muted colors, detailed, 8k"
|
||||||
|
|
||||||
# pass prompt and image to pipeline
|
|
||||||
generator = torch.manual_seed(0)
|
generator = torch.manual_seed(0)
|
||||||
image = pipe(
|
image = pipe(
|
||||||
prompt,
|
prompt,
|
||||||
@@ -104,22 +139,130 @@ image = pipe(
|
|||||||
strength=0.5,
|
strength=0.5,
|
||||||
generator=generator
|
generator=generator
|
||||||
).images[0]
|
).images[0]
|
||||||
make_image_grid([init_image, image], rows=1, cols=2)
|
image
|
||||||
```
|
```
|
||||||
|
|
||||||

|
<div class="flex gap-4">
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm-img2img.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="LCM-LoRA">
|
||||||
|
|
||||||
<Tip>
|
To use LCM-LoRAs for image-to-image, you need to replace the scheduler with the [`LCMScheduler`] and load the LCM-LoRA weights with the [`~loaders.LoraLoaderMixin.load_lora_weights`] method. Then you can use the pipeline as usual, and pass a text prompt and initial image to generate an image in just 4 steps.
|
||||||
|
|
||||||
You can get different results based on your prompt and the image you provide. To get the best results, we recommend trying different values for `num_inference_steps`, `strength`, and `guidance_scale` parameters and choose the best one.
|
> [!TIP]
|
||||||
|
> Experiment with different values for `num_inference_steps`, `strength`, and `guidance_scale` to get the best results.
|
||||||
|
|
||||||
</Tip>
|
```py
|
||||||
|
import torch
|
||||||
|
from diffusers import AutoPipelineForImage2Image, LCMScheduler
|
||||||
|
from diffusers.utils import make_image_grid, load_image
|
||||||
|
|
||||||
|
pipe = AutoPipelineForImage2Image.from_pretrained(
|
||||||
|
"Lykon/dreamshaper-7",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
variant="fp16",
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
## Combine with style LoRAs
|
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
||||||
|
|
||||||
LCMs can be used with other styled LoRAs to generate styled-images in very few steps (4-8). In the following example, we'll use the [papercut LoRA](TheLastBen/Papercut_SDXL).
|
pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
|
||||||
|
|
||||||
|
init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png")
|
||||||
|
prompt = "Astronauts in a jungle, cold color palette, muted colors, detailed, 8k"
|
||||||
|
|
||||||
|
generator = torch.manual_seed(0)
|
||||||
|
image = pipe(
|
||||||
|
prompt,
|
||||||
|
image=init_image,
|
||||||
|
num_inference_steps=4,
|
||||||
|
guidance_scale=1,
|
||||||
|
strength=0.6,
|
||||||
|
generator=generator
|
||||||
|
).images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex gap-4">
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm-lora-img2img.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
|
## Inpainting
|
||||||
|
|
||||||
|
To use LCM-LoRAs for inpainting, you need to replace the scheduler with the [`LCMScheduler`] and load the LCM-LoRA weights with the [`~loaders.LoraLoaderMixin.load_lora_weights`] method. Then you can use the pipeline as usual, and pass a text prompt, initial image, and mask image to generate an image in just 4 steps.
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from diffusers import AutoPipelineForInpainting, LCMScheduler
|
||||||
|
from diffusers.utils import load_image, make_image_grid
|
||||||
|
|
||||||
|
pipe = AutoPipelineForInpainting.from_pretrained(
|
||||||
|
"runwayml/stable-diffusion-inpainting",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
variant="fp16",
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
||||||
|
|
||||||
|
pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
|
||||||
|
|
||||||
|
init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png")
|
||||||
|
mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png")
|
||||||
|
|
||||||
|
prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k"
|
||||||
|
generator = torch.manual_seed(0)
|
||||||
|
image = pipe(
|
||||||
|
prompt=prompt,
|
||||||
|
image=init_image,
|
||||||
|
mask_image=mask_image,
|
||||||
|
generator=generator,
|
||||||
|
num_inference_steps=4,
|
||||||
|
guidance_scale=4,
|
||||||
|
).images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex gap-4">
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm-lora-inpaint.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## Adapters
|
||||||
|
|
||||||
|
LCMs are compatible with adapters like LoRA, ControlNet, T2I-Adapter, and AnimateDiff. You can bring the speed of LCMs to these adapters to generate images in a certain style or condition the model on another input like a canny image.
|
||||||
|
|
||||||
|
### LoRA
|
||||||
|
|
||||||
|
[LoRA](../using-diffusers/loading_adapters#lora) adapters can be rapidly finetuned to learn a new style from just a few images and plugged into a pretrained model to generate images in that style.
|
||||||
|
|
||||||
|
<hfoptions id="lcm-lora">
|
||||||
|
<hfoption id="LCM">
|
||||||
|
|
||||||
|
Load the LCM checkpoint for your supported model into [`UNet2DConditionModel`] and replace the scheduler with the [`LCMScheduler`]. Then you can use the [`~loaders.LoraLoaderMixin.load_lora_weights`] method to load the LoRA weights into the LCM and generate a styled image in a few steps.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, LCMScheduler
|
from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, LCMScheduler
|
||||||
@@ -134,11 +277,9 @@ pipe = StableDiffusionXLPipeline.from_pretrained(
|
|||||||
"stabilityai/stable-diffusion-xl-base-1.0", unet=unet, torch_dtype=torch.float16, variant="fp16",
|
"stabilityai/stable-diffusion-xl-base-1.0", unet=unet, torch_dtype=torch.float16, variant="fp16",
|
||||||
).to("cuda")
|
).to("cuda")
|
||||||
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
||||||
|
|
||||||
pipe.load_lora_weights("TheLastBen/Papercut_SDXL", weight_name="papercut.safetensors", adapter_name="papercut")
|
pipe.load_lora_weights("TheLastBen/Papercut_SDXL", weight_name="papercut.safetensors", adapter_name="papercut")
|
||||||
|
|
||||||
prompt = "papercut, a cute fox"
|
prompt = "papercut, a cute fox"
|
||||||
|
|
||||||
generator = torch.manual_seed(0)
|
generator = torch.manual_seed(0)
|
||||||
image = pipe(
|
image = pipe(
|
||||||
prompt=prompt, num_inference_steps=4, generator=generator, guidance_scale=8.0
|
prompt=prompt, num_inference_steps=4, generator=generator, guidance_scale=8.0
|
||||||
@@ -146,15 +287,58 @@ image = pipe(
|
|||||||
image
|
image
|
||||||
```
|
```
|
||||||
|
|
||||||

|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_full_sdx_lora_mix.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="LCM-LoRA">
|
||||||
|
|
||||||
## ControlNet/T2I-Adapter
|
Replace the scheduler with the [`LCMScheduler`]. Then you can use the [`~loaders.LoraLoaderMixin.load_lora_weights`] method to load the LCM-LoRA weights and the style LoRA you want to use. Combine both LoRA adapters with the [`~loaders.UNet2DConditionLoadersMixin.set_adapters`] method and generate a styled image in a few steps.
|
||||||
|
|
||||||
Let's look at how we can perform inference with ControlNet/T2I-Adapter and a LCM.
|
```py
|
||||||
|
import torch
|
||||||
|
from diffusers import DiffusionPipeline, LCMScheduler
|
||||||
|
|
||||||
|
pipe = DiffusionPipeline.from_pretrained(
|
||||||
|
"stabilityai/stable-diffusion-xl-base-1.0",
|
||||||
|
variant="fp16",
|
||||||
|
torch_dtype=torch.float16
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
||||||
|
|
||||||
|
pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl", adapter_name="lcm")
|
||||||
|
pipe.load_lora_weights("TheLastBen/Papercut_SDXL", weight_name="papercut.safetensors", adapter_name="papercut")
|
||||||
|
|
||||||
|
pipe.set_adapters(["lcm", "papercut"], adapter_weights=[1.0, 0.8])
|
||||||
|
|
||||||
|
prompt = "papercut, a cute fox"
|
||||||
|
generator = torch.manual_seed(0)
|
||||||
|
image = pipe(prompt, num_inference_steps=4, guidance_scale=1, generator=generator).images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdx_lora_mix.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
### ControlNet
|
### ControlNet
|
||||||
For this example, we'll use the [LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) model with canny ControlNet, but the same steps can be applied to other LCM models as well.
|
|
||||||
|
[ControlNet](./controlnet) are adapters that can be trained on a variety of inputs like canny edge, pose estimation, or depth. The ControlNet can be inserted into the pipeline to provide additional conditioning and control to the model for more accurate generation.
|
||||||
|
|
||||||
|
You can find additional ControlNet models trained on other inputs in [lllyasviel's](https://hf.co/lllyasviel) repository.
|
||||||
|
|
||||||
|
<hfoptions id="lcm-controlnet">
|
||||||
|
<hfoption id="LCM">
|
||||||
|
|
||||||
|
Load a ControlNet model trained on canny images and pass it to the [`ControlNetModel`]. Then you can load a LCM model into [`StableDiffusionControlNetPipeline`] and replace the scheduler with the [`LCMScheduler`]. Now pass the canny image to the pipeline and generate an image.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Experiment with different values for `num_inference_steps`, `controlnet_conditioning_scale`, `cross_attention_kwargs`, and `guidance_scale` to get the best results.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
@@ -186,8 +370,6 @@ pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
|||||||
torch_dtype=torch.float16,
|
torch_dtype=torch.float16,
|
||||||
safety_checker=None,
|
safety_checker=None,
|
||||||
).to("cuda")
|
).to("cuda")
|
||||||
|
|
||||||
# set scheduler
|
|
||||||
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
||||||
|
|
||||||
generator = torch.manual_seed(0)
|
generator = torch.manual_seed(0)
|
||||||
@@ -200,16 +382,84 @@ image = pipe(
|
|||||||
make_image_grid([canny_image, image], rows=1, cols=2)
|
make_image_grid([canny_image, image], rows=1, cols=2)
|
||||||
```
|
```
|
||||||
|
|
||||||

|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_full_sdv1-5_controlnet.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="LCM-LoRA">
|
||||||
|
|
||||||
<Tip>
|
Load a ControlNet model trained on canny images and pass it to the [`ControlNetModel`]. Then you can load a Stable Diffusion v1.5 model into [`StableDiffusionControlNetPipeline`] and replace the scheduler with the [`LCMScheduler`]. Use the [`~loaders.LoraLoaderMixin.load_lora_weights`] method to load the LCM-LoRA weights, and pass the canny image to the pipeline and generate an image.
|
||||||
The inference parameters in this example might not work for all examples, so we recommend trying different values for the `num_inference_steps`, `guidance_scale`, `controlnet_conditioning_scale`, and `cross_attention_kwargs` parameters and choosing the best one.
|
|
||||||
</Tip>
|
> [!TIP]
|
||||||
|
> Experiment with different values for `num_inference_steps`, `controlnet_conditioning_scale`, `cross_attention_kwargs`, and `guidance_scale` to get the best results.
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, LCMScheduler
|
||||||
|
from diffusers.utils import load_image
|
||||||
|
|
||||||
|
image = load_image(
|
||||||
|
"https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
|
||||||
|
).resize((512, 512))
|
||||||
|
|
||||||
|
image = np.array(image)
|
||||||
|
|
||||||
|
low_threshold = 100
|
||||||
|
high_threshold = 200
|
||||||
|
|
||||||
|
image = cv2.Canny(image, low_threshold, high_threshold)
|
||||||
|
image = image[:, :, None]
|
||||||
|
image = np.concatenate([image, image, image], axis=2)
|
||||||
|
canny_image = Image.fromarray(image)
|
||||||
|
|
||||||
|
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
||||||
|
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
||||||
|
"runwayml/stable-diffusion-v1-5",
|
||||||
|
controlnet=controlnet,
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
safety_checker=None,
|
||||||
|
variant="fp16"
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
||||||
|
|
||||||
|
pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
|
||||||
|
|
||||||
|
generator = torch.manual_seed(0)
|
||||||
|
image = pipe(
|
||||||
|
"the mona lisa",
|
||||||
|
image=canny_image,
|
||||||
|
num_inference_steps=4,
|
||||||
|
guidance_scale=1.5,
|
||||||
|
controlnet_conditioning_scale=0.8,
|
||||||
|
cross_attention_kwargs={"scale": 1},
|
||||||
|
generator=generator,
|
||||||
|
).images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdv1-5_controlnet.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
### T2I-Adapter
|
### T2I-Adapter
|
||||||
|
|
||||||
This example shows how to use the `lcm-sdxl` with the [Canny T2I-Adapter](TencentARC/t2i-adapter-canny-sdxl-1.0).
|
[T2I-Adapter](./t2i_adapter) is an even more lightweight adapter than ControlNet, that provides an additional input to condition a pretrained model with. It is faster than ControlNet but the results may be slightly worse.
|
||||||
|
|
||||||
|
You can find additional T2I-Adapter checkpoints trained on other inputs in [TencentArc's](https://hf.co/TencentARC) repository.
|
||||||
|
|
||||||
|
<hfoptions id="lcm-t2i">
|
||||||
|
<hfoption id="LCM">
|
||||||
|
|
||||||
|
Load a T2IAdapter trained on canny images and pass it to the [`StableDiffusionXLAdapterPipeline`]. Then load a LCM checkpoint into [`UNet2DConditionModel`] and replace the scheduler with the [`LCMScheduler`]. Now pass the canny image to the pipeline and generate an image.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
@@ -220,10 +470,9 @@ from PIL import Image
|
|||||||
from diffusers import StableDiffusionXLAdapterPipeline, UNet2DConditionModel, T2IAdapter, LCMScheduler
|
from diffusers import StableDiffusionXLAdapterPipeline, UNet2DConditionModel, T2IAdapter, LCMScheduler
|
||||||
from diffusers.utils import load_image, make_image_grid
|
from diffusers.utils import load_image, make_image_grid
|
||||||
|
|
||||||
# Prepare image
|
# detect the canny map in low resolution to avoid high-frequency details
|
||||||
# Detect the canny map in low resolution to avoid high-frequency details
|
|
||||||
image = load_image(
|
image = load_image(
|
||||||
"https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_canny.jpg"
|
"https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
|
||||||
).resize((384, 384))
|
).resize((384, 384))
|
||||||
|
|
||||||
image = np.array(image)
|
image = np.array(image)
|
||||||
@@ -236,7 +485,6 @@ image = image[:, :, None]
|
|||||||
image = np.concatenate([image, image, image], axis=2)
|
image = np.concatenate([image, image, image], axis=2)
|
||||||
canny_image = Image.fromarray(image).resize((1024, 1216))
|
canny_image = Image.fromarray(image).resize((1024, 1216))
|
||||||
|
|
||||||
# load adapter
|
|
||||||
adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-canny-sdxl-1.0", torch_dtype=torch.float16, varient="fp16").to("cuda")
|
adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-canny-sdxl-1.0", torch_dtype=torch.float16, varient="fp16").to("cuda")
|
||||||
|
|
||||||
unet = UNet2DConditionModel.from_pretrained(
|
unet = UNet2DConditionModel.from_pretrained(
|
||||||
@@ -254,7 +502,7 @@ pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
|
|||||||
|
|
||||||
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
||||||
|
|
||||||
prompt = "Mystical fairy in real, magic, 4k picture, high quality"
|
prompt = "the mona lisa, 4k picture, high quality"
|
||||||
negative_prompt = "extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured"
|
negative_prompt = "extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured"
|
||||||
|
|
||||||
generator = torch.manual_seed(0)
|
generator = torch.manual_seed(0)
|
||||||
@@ -268,7 +516,116 @@ image = pipe(
|
|||||||
adapter_conditioning_factor=1,
|
adapter_conditioning_factor=1,
|
||||||
generator=generator,
|
generator=generator,
|
||||||
).images[0]
|
).images[0]
|
||||||
grid = make_image_grid([canny_image, image], rows=1, cols=2)
|
|
||||||
```
|
```
|
||||||
|
|
||||||

|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm-t2i.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="LCM-LoRA">
|
||||||
|
|
||||||
|
Load a T2IAdapter trained on canny images and pass it to the [`StableDiffusionXLAdapterPipeline`]. Replace the scheduler with the [`LCMScheduler`], and use the [`~loaders.LoraLoaderMixin.load_lora_weights`] method to load the LCM-LoRA weights. Pass the canny image to the pipeline and generate an image.
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
from diffusers import StableDiffusionXLAdapterPipeline, UNet2DConditionModel, T2IAdapter, LCMScheduler
|
||||||
|
from diffusers.utils import load_image, make_image_grid
|
||||||
|
|
||||||
|
# detect the canny map in low resolution to avoid high-frequency details
|
||||||
|
image = load_image(
|
||||||
|
"https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
|
||||||
|
).resize((384, 384))
|
||||||
|
|
||||||
|
image = np.array(image)
|
||||||
|
|
||||||
|
low_threshold = 100
|
||||||
|
high_threshold = 200
|
||||||
|
|
||||||
|
image = cv2.Canny(image, low_threshold, high_threshold)
|
||||||
|
image = image[:, :, None]
|
||||||
|
image = np.concatenate([image, image, image], axis=2)
|
||||||
|
canny_image = Image.fromarray(image).resize((1024, 1024))
|
||||||
|
|
||||||
|
adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-canny-sdxl-1.0", torch_dtype=torch.float16, varient="fp16").to("cuda")
|
||||||
|
|
||||||
|
pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
|
||||||
|
"stabilityai/stable-diffusion-xl-base-1.0",
|
||||||
|
adapter=adapter,
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
variant="fp16",
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
||||||
|
|
||||||
|
pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
|
||||||
|
|
||||||
|
prompt = "the mona lisa, 4k picture, high quality"
|
||||||
|
negative_prompt = "extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured"
|
||||||
|
|
||||||
|
generator = torch.manual_seed(0)
|
||||||
|
image = pipe(
|
||||||
|
prompt=prompt,
|
||||||
|
negative_prompt=negative_prompt,
|
||||||
|
image=canny_image,
|
||||||
|
num_inference_steps=4,
|
||||||
|
guidance_scale=1.5,
|
||||||
|
adapter_conditioning_scale=0.8,
|
||||||
|
adapter_conditioning_factor=1,
|
||||||
|
generator=generator,
|
||||||
|
).images[0]
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm-lora-t2i.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
|
### AnimateDiff
|
||||||
|
|
||||||
|
[AnimateDiff](../api/pipelines/animatediff) is an adapter that adds motion to an image. It can be used with most Stable Diffusion models, effectively turning them into "video generation" models. Generating good results with a video model usually requires generating multiple frames (16-24), which can be very slow with a regular Stable Diffusion model. LCM-LoRA can speed up this process by only taking 4-8 steps for each frame.
|
||||||
|
|
||||||
|
Load a [`AnimateDiffPipeline`] and pass a [`MotionAdapter`] to it. Then replace the scheduler with the [`LCMScheduler`], and combine both LoRA adapters with the [`~loaders.UNet2DConditionLoadersMixin.set_adapters`] method. Now you can pass a prompt to the pipeline and generate an animated image.
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler, LCMScheduler
|
||||||
|
from diffusers.utils import export_to_gif
|
||||||
|
|
||||||
|
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5")
|
||||||
|
pipe = AnimateDiffPipeline.from_pretrained(
|
||||||
|
"frankjoshua/toonyou_beta6",
|
||||||
|
motion_adapter=adapter,
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
# set scheduler
|
||||||
|
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
||||||
|
|
||||||
|
# load LCM-LoRA
|
||||||
|
pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5", adapter_name="lcm")
|
||||||
|
pipe.load_lora_weights("guoyww/animatediff-motion-lora-zoom-in", weight_name="diffusion_pytorch_model.safetensors", adapter_name="motion-lora")
|
||||||
|
|
||||||
|
pipe.set_adapters(["lcm", "motion-lora"], adapter_weights=[0.55, 1.2])
|
||||||
|
|
||||||
|
prompt = "best quality, masterpiece, 1girl, looking at viewer, blurry background, upper body, contemporary, dress"
|
||||||
|
generator = torch.manual_seed(0)
|
||||||
|
frames = pipe(
|
||||||
|
prompt=prompt,
|
||||||
|
num_inference_steps=5,
|
||||||
|
guidance_scale=1.25,
|
||||||
|
cross_attention_kwargs={"scale": 1},
|
||||||
|
num_frames=24,
|
||||||
|
generator=generator
|
||||||
|
).frames[0]
|
||||||
|
export_to_gif(frames, "animation.gif")
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm-lora-animatediff.gif"/>
|
||||||
|
</div>
|
||||||
|
|||||||
@@ -1,422 +0,0 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
[[open-in-colab]]
|
|
||||||
|
|
||||||
# Performing inference with LCM-LoRA
|
|
||||||
|
|
||||||
Latent Consistency Models (LCM) enable quality image generation in typically 2-4 steps making it possible to use diffusion models in almost real-time settings.
|
|
||||||
|
|
||||||
From the [official website](https://latent-consistency-models.github.io/):
|
|
||||||
|
|
||||||
> LCMs can be distilled from any pre-trained Stable Diffusion (SD) in only 4,000 training steps (~32 A100 GPU Hours) for generating high quality 768 x 768 resolution images in 2~4 steps or even one step, significantly accelerating text-to-image generation. We employ LCM to distill the Dreamshaper-V7 version of SD in just 4,000 training iterations.
|
|
||||||
|
|
||||||
For a more technical overview of LCMs, refer to [the paper](https://huggingface.co/papers/2310.04378).
|
|
||||||
|
|
||||||
However, each model needs to be distilled separately for latent consistency distillation. The core idea with LCM-LoRA is to train just a few adapter layers, the adapter being LoRA in this case.
|
|
||||||
This way, we don't have to train the full model and keep the number of trainable parameters manageable. The resulting LoRAs can then be applied to any fine-tuned version of the model without distilling them separately.
|
|
||||||
Additionally, the LoRAs can be applied to image-to-image, ControlNet/T2I-Adapter, inpainting, AnimateDiff etc.
|
|
||||||
The LCM-LoRA can also be combined with other LoRAs to generate styled images in very few steps (4-8).
|
|
||||||
|
|
||||||
LCM-LoRAs are available for [stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5), [stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0), and the [SSD-1B](https://huggingface.co/segmind/SSD-1B) model. All the checkpoints can be found in this [collection](https://huggingface.co/collections/latent-consistency/latent-consistency-models-loras-654cdd24e111e16f0865fba6).
|
|
||||||
|
|
||||||
For more details about LCM-LoRA, refer to [the technical report](https://huggingface.co/papers/2311.05556).
|
|
||||||
|
|
||||||
This guide shows how to perform inference with LCM-LoRAs for
|
|
||||||
- text-to-image
|
|
||||||
- image-to-image
|
|
||||||
- combined with styled LoRAs
|
|
||||||
- ControlNet/T2I-Adapter
|
|
||||||
- inpainting
|
|
||||||
- AnimateDiff
|
|
||||||
|
|
||||||
Before going through this guide, we'll take a look at the general workflow for performing inference with LCM-LoRAs.
|
|
||||||
LCM-LoRAs are similar to other Stable Diffusion LoRAs so they can be used with any [`DiffusionPipeline`] that supports LoRAs.
|
|
||||||
|
|
||||||
- Load the task specific pipeline and model.
|
|
||||||
- Set the scheduler to [`LCMScheduler`].
|
|
||||||
- Load the LCM-LoRA weights for the model.
|
|
||||||
- Reduce the `guidance_scale` between `[1.0, 2.0]` and set the `num_inference_steps` between [4, 8].
|
|
||||||
- Perform inference with the pipeline with the usual parameters.
|
|
||||||
|
|
||||||
Let's look at how we can perform inference with LCM-LoRAs for different tasks.
|
|
||||||
|
|
||||||
First, make sure you have [peft](https://github.com/huggingface/peft) installed, for better LoRA support.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install -U peft
|
|
||||||
```
|
|
||||||
|
|
||||||
## Text-to-image
|
|
||||||
|
|
||||||
You'll use the [`StableDiffusionXLPipeline`] with the scheduler: [`LCMScheduler`] and then load the LCM-LoRA. Together with the LCM-LoRA and the scheduler, the pipeline enables a fast inference workflow overcoming the slow iterative nature of diffusion models.
|
|
||||||
|
|
||||||
```python
|
|
||||||
import torch
|
|
||||||
from diffusers import DiffusionPipeline, LCMScheduler
|
|
||||||
|
|
||||||
pipe = DiffusionPipeline.from_pretrained(
|
|
||||||
"stabilityai/stable-diffusion-xl-base-1.0",
|
|
||||||
variant="fp16",
|
|
||||||
torch_dtype=torch.float16
|
|
||||||
).to("cuda")
|
|
||||||
|
|
||||||
# set scheduler
|
|
||||||
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
|
||||||
|
|
||||||
# load LCM-LoRA
|
|
||||||
pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
|
|
||||||
|
|
||||||
prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k"
|
|
||||||
|
|
||||||
generator = torch.manual_seed(42)
|
|
||||||
image = pipe(
|
|
||||||
prompt=prompt, num_inference_steps=4, generator=generator, guidance_scale=1.0
|
|
||||||
).images[0]
|
|
||||||
```
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
Notice that we use only 4 steps for generation which is way less than what's typically used for standard SDXL.
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
|
|
||||||
You may have noticed that we set `guidance_scale=1.0`, which disables classifer-free-guidance. This is because the LCM-LoRA is trained with guidance, so the batch size does not have to be doubled in this case. This leads to a faster inference time, with the drawback that negative prompts don't have any effect on the denoising process.
|
|
||||||
|
|
||||||
You can also use guidance with LCM-LoRA, but due to the nature of training the model is very sensitve to the `guidance_scale` values, high values can lead to artifacts in the generated images. In our experiments, we found that the best values are in the range of [1.0, 2.0].
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
### Inference with a fine-tuned model
|
|
||||||
|
|
||||||
As mentioned above, the LCM-LoRA can be applied to any fine-tuned version of the model without having to distill them separately. Let's look at how we can perform inference with a fine-tuned model. In this example, we'll use the [animagine-xl](https://huggingface.co/Linaqruf/animagine-xl) model, which is a fine-tuned version of the SDXL model for generating anime.
|
|
||||||
|
|
||||||
```python
|
|
||||||
from diffusers import DiffusionPipeline, LCMScheduler
|
|
||||||
|
|
||||||
pipe = DiffusionPipeline.from_pretrained(
|
|
||||||
"Linaqruf/animagine-xl",
|
|
||||||
variant="fp16",
|
|
||||||
torch_dtype=torch.float16
|
|
||||||
).to("cuda")
|
|
||||||
|
|
||||||
# set scheduler
|
|
||||||
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
|
||||||
|
|
||||||
# load LCM-LoRA
|
|
||||||
pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
|
|
||||||
|
|
||||||
prompt = "face focus, cute, masterpiece, best quality, 1girl, green hair, sweater, looking at viewer, upper body, beanie, outdoors, night, turtleneck"
|
|
||||||
|
|
||||||
generator = torch.manual_seed(0)
|
|
||||||
image = pipe(
|
|
||||||
prompt=prompt, num_inference_steps=4, generator=generator, guidance_scale=1.0
|
|
||||||
).images[0]
|
|
||||||
```
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
|
|
||||||
## Image-to-image
|
|
||||||
|
|
||||||
LCM-LoRA can be applied to image-to-image tasks too. Let's look at how we can perform image-to-image generation with LCMs. For this example we'll use the [dreamshaper-7](https://huggingface.co/Lykon/dreamshaper-7) model and the LCM-LoRA for `stable-diffusion-v1-5 `.
|
|
||||||
|
|
||||||
```python
|
|
||||||
import torch
|
|
||||||
from diffusers import AutoPipelineForImage2Image, LCMScheduler
|
|
||||||
from diffusers.utils import make_image_grid, load_image
|
|
||||||
|
|
||||||
pipe = AutoPipelineForImage2Image.from_pretrained(
|
|
||||||
"Lykon/dreamshaper-7",
|
|
||||||
torch_dtype=torch.float16,
|
|
||||||
variant="fp16",
|
|
||||||
).to("cuda")
|
|
||||||
|
|
||||||
# set scheduler
|
|
||||||
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
|
||||||
|
|
||||||
# load LCM-LoRA
|
|
||||||
pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
|
|
||||||
|
|
||||||
# prepare image
|
|
||||||
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"
|
|
||||||
init_image = load_image(url)
|
|
||||||
prompt = "Astronauts in a jungle, cold color palette, muted colors, detailed, 8k"
|
|
||||||
|
|
||||||
# pass prompt and image to pipeline
|
|
||||||
generator = torch.manual_seed(0)
|
|
||||||
image = pipe(
|
|
||||||
prompt,
|
|
||||||
image=init_image,
|
|
||||||
num_inference_steps=4,
|
|
||||||
guidance_scale=1,
|
|
||||||
strength=0.6,
|
|
||||||
generator=generator
|
|
||||||
).images[0]
|
|
||||||
make_image_grid([init_image, image], rows=1, cols=2)
|
|
||||||
```
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
|
|
||||||
You can get different results based on your prompt and the image you provide. To get the best results, we recommend trying different values for `num_inference_steps`, `strength`, and `guidance_scale` parameters and choose the best one.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
|
|
||||||
## Combine with styled LoRAs
|
|
||||||
|
|
||||||
LCM-LoRA can be combined with other LoRAs to generate styled-images in very few steps (4-8). In the following example, we'll use the LCM-LoRA with the [papercut LoRA](TheLastBen/Papercut_SDXL).
|
|
||||||
To learn more about how to combine LoRAs, refer to [this guide](https://huggingface.co/docs/diffusers/tutorials/using_peft_for_inference#combine-multiple-adapters).
|
|
||||||
|
|
||||||
```python
|
|
||||||
import torch
|
|
||||||
from diffusers import DiffusionPipeline, LCMScheduler
|
|
||||||
|
|
||||||
pipe = DiffusionPipeline.from_pretrained(
|
|
||||||
"stabilityai/stable-diffusion-xl-base-1.0",
|
|
||||||
variant="fp16",
|
|
||||||
torch_dtype=torch.float16
|
|
||||||
).to("cuda")
|
|
||||||
|
|
||||||
# set scheduler
|
|
||||||
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
|
||||||
|
|
||||||
# load LoRAs
|
|
||||||
pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl", adapter_name="lcm")
|
|
||||||
pipe.load_lora_weights("TheLastBen/Papercut_SDXL", weight_name="papercut.safetensors", adapter_name="papercut")
|
|
||||||
|
|
||||||
# Combine LoRAs
|
|
||||||
pipe.set_adapters(["lcm", "papercut"], adapter_weights=[1.0, 0.8])
|
|
||||||
|
|
||||||
prompt = "papercut, a cute fox"
|
|
||||||
generator = torch.manual_seed(0)
|
|
||||||
image = pipe(prompt, num_inference_steps=4, guidance_scale=1, generator=generator).images[0]
|
|
||||||
image
|
|
||||||
```
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
|
|
||||||
## ControlNet/T2I-Adapter
|
|
||||||
|
|
||||||
Let's look at how we can perform inference with ControlNet/T2I-Adapter and LCM-LoRA.
|
|
||||||
|
|
||||||
### ControlNet
|
|
||||||
For this example, we'll use the SD-v1-5 model and the LCM-LoRA for SD-v1-5 with canny ControlNet.
|
|
||||||
|
|
||||||
```python
|
|
||||||
import torch
|
|
||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
from PIL import Image
|
|
||||||
|
|
||||||
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, LCMScheduler
|
|
||||||
from diffusers.utils import load_image
|
|
||||||
|
|
||||||
image = load_image(
|
|
||||||
"https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
|
|
||||||
).resize((512, 512))
|
|
||||||
|
|
||||||
image = np.array(image)
|
|
||||||
|
|
||||||
low_threshold = 100
|
|
||||||
high_threshold = 200
|
|
||||||
|
|
||||||
image = cv2.Canny(image, low_threshold, high_threshold)
|
|
||||||
image = image[:, :, None]
|
|
||||||
image = np.concatenate([image, image, image], axis=2)
|
|
||||||
canny_image = Image.fromarray(image)
|
|
||||||
|
|
||||||
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
|
||||||
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
|
||||||
"runwayml/stable-diffusion-v1-5",
|
|
||||||
controlnet=controlnet,
|
|
||||||
torch_dtype=torch.float16,
|
|
||||||
safety_checker=None,
|
|
||||||
variant="fp16"
|
|
||||||
).to("cuda")
|
|
||||||
|
|
||||||
# set scheduler
|
|
||||||
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
|
||||||
|
|
||||||
# load LCM-LoRA
|
|
||||||
pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
|
|
||||||
|
|
||||||
generator = torch.manual_seed(0)
|
|
||||||
image = pipe(
|
|
||||||
"the mona lisa",
|
|
||||||
image=canny_image,
|
|
||||||
num_inference_steps=4,
|
|
||||||
guidance_scale=1.5,
|
|
||||||
controlnet_conditioning_scale=0.8,
|
|
||||||
cross_attention_kwargs={"scale": 1},
|
|
||||||
generator=generator,
|
|
||||||
).images[0]
|
|
||||||
make_image_grid([canny_image, image], rows=1, cols=2)
|
|
||||||
```
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
The inference parameters in this example might not work for all examples, so we recommend you to try different values for `num_inference_steps`, `guidance_scale`, `controlnet_conditioning_scale` and `cross_attention_kwargs` parameters and choose the best one.
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
### T2I-Adapter
|
|
||||||
|
|
||||||
This example shows how to use the LCM-LoRA with the [Canny T2I-Adapter](TencentARC/t2i-adapter-canny-sdxl-1.0) and SDXL.
|
|
||||||
|
|
||||||
```python
|
|
||||||
import torch
|
|
||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
from PIL import Image
|
|
||||||
|
|
||||||
from diffusers import StableDiffusionXLAdapterPipeline, T2IAdapter, LCMScheduler
|
|
||||||
from diffusers.utils import load_image, make_image_grid
|
|
||||||
|
|
||||||
# Prepare image
|
|
||||||
# Detect the canny map in low resolution to avoid high-frequency details
|
|
||||||
image = load_image(
|
|
||||||
"https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_canny.jpg"
|
|
||||||
).resize((384, 384))
|
|
||||||
|
|
||||||
image = np.array(image)
|
|
||||||
|
|
||||||
low_threshold = 100
|
|
||||||
high_threshold = 200
|
|
||||||
|
|
||||||
image = cv2.Canny(image, low_threshold, high_threshold)
|
|
||||||
image = image[:, :, None]
|
|
||||||
image = np.concatenate([image, image, image], axis=2)
|
|
||||||
canny_image = Image.fromarray(image).resize((1024, 1024))
|
|
||||||
|
|
||||||
# load adapter
|
|
||||||
adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-canny-sdxl-1.0", torch_dtype=torch.float16, varient="fp16").to("cuda")
|
|
||||||
|
|
||||||
pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
|
|
||||||
"stabilityai/stable-diffusion-xl-base-1.0",
|
|
||||||
adapter=adapter,
|
|
||||||
torch_dtype=torch.float16,
|
|
||||||
variant="fp16",
|
|
||||||
).to("cuda")
|
|
||||||
|
|
||||||
# set scheduler
|
|
||||||
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
|
||||||
|
|
||||||
# load LCM-LoRA
|
|
||||||
pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
|
|
||||||
|
|
||||||
prompt = "Mystical fairy in real, magic, 4k picture, high quality"
|
|
||||||
negative_prompt = "extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured"
|
|
||||||
|
|
||||||
generator = torch.manual_seed(0)
|
|
||||||
image = pipe(
|
|
||||||
prompt=prompt,
|
|
||||||
negative_prompt=negative_prompt,
|
|
||||||
image=canny_image,
|
|
||||||
num_inference_steps=4,
|
|
||||||
guidance_scale=1.5,
|
|
||||||
adapter_conditioning_scale=0.8,
|
|
||||||
adapter_conditioning_factor=1,
|
|
||||||
generator=generator,
|
|
||||||
).images[0]
|
|
||||||
make_image_grid([canny_image, image], rows=1, cols=2)
|
|
||||||
```
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
|
|
||||||
## Inpainting
|
|
||||||
|
|
||||||
LCM-LoRA can be used for inpainting as well.
|
|
||||||
|
|
||||||
```python
|
|
||||||
import torch
|
|
||||||
from diffusers import AutoPipelineForInpainting, LCMScheduler
|
|
||||||
from diffusers.utils import load_image, make_image_grid
|
|
||||||
|
|
||||||
pipe = AutoPipelineForInpainting.from_pretrained(
|
|
||||||
"runwayml/stable-diffusion-inpainting",
|
|
||||||
torch_dtype=torch.float16,
|
|
||||||
variant="fp16",
|
|
||||||
).to("cuda")
|
|
||||||
|
|
||||||
# set scheduler
|
|
||||||
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
|
||||||
|
|
||||||
# load LCM-LoRA
|
|
||||||
pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
|
|
||||||
|
|
||||||
# load base and mask image
|
|
||||||
init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png")
|
|
||||||
mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png")
|
|
||||||
|
|
||||||
# generator = torch.Generator("cuda").manual_seed(92)
|
|
||||||
prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k"
|
|
||||||
generator = torch.manual_seed(0)
|
|
||||||
image = pipe(
|
|
||||||
prompt=prompt,
|
|
||||||
image=init_image,
|
|
||||||
mask_image=mask_image,
|
|
||||||
generator=generator,
|
|
||||||
num_inference_steps=4,
|
|
||||||
guidance_scale=4,
|
|
||||||
).images[0]
|
|
||||||
make_image_grid([init_image, mask_image, image], rows=1, cols=3)
|
|
||||||
```
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
|
|
||||||
## AnimateDiff
|
|
||||||
|
|
||||||
[`AnimateDiff`] allows you to animate images using Stable Diffusion models. To get good results, we need to generate multiple frames (16-24), and doing this with standard SD models can be very slow.
|
|
||||||
LCM-LoRA can be used to speed up the process significantly, as you just need to do 4-8 steps for each frame. Let's look at how we can perform animation with LCM-LoRA and AnimateDiff.
|
|
||||||
|
|
||||||
```python
|
|
||||||
import torch
|
|
||||||
from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler, LCMScheduler
|
|
||||||
from diffusers.utils import export_to_gif
|
|
||||||
|
|
||||||
adapter = MotionAdapter.from_pretrained("diffusers/animatediff-motion-adapter-v1-5")
|
|
||||||
pipe = AnimateDiffPipeline.from_pretrained(
|
|
||||||
"frankjoshua/toonyou_beta6",
|
|
||||||
motion_adapter=adapter,
|
|
||||||
).to("cuda")
|
|
||||||
|
|
||||||
# set scheduler
|
|
||||||
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
|
||||||
|
|
||||||
# load LCM-LoRA
|
|
||||||
pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5", adapter_name="lcm")
|
|
||||||
pipe.load_lora_weights("guoyww/animatediff-motion-lora-zoom-in", weight_name="diffusion_pytorch_model.safetensors", adapter_name="motion-lora")
|
|
||||||
|
|
||||||
pipe.set_adapters(["lcm", "motion-lora"], adapter_weights=[0.55, 1.2])
|
|
||||||
|
|
||||||
prompt = "best quality, masterpiece, 1girl, looking at viewer, blurry background, upper body, contemporary, dress"
|
|
||||||
generator = torch.manual_seed(0)
|
|
||||||
frames = pipe(
|
|
||||||
prompt=prompt,
|
|
||||||
num_inference_steps=5,
|
|
||||||
guidance_scale=1.25,
|
|
||||||
cross_attention_kwargs={"scale": 1},
|
|
||||||
num_frames=24,
|
|
||||||
generator=generator
|
|
||||||
).frames[0]
|
|
||||||
export_to_gif(frames, "animation.gif")
|
|
||||||
```
|
|
||||||
|
|
||||||

|
|
||||||
@@ -249,13 +249,13 @@ controlnet = ControlNetModel.from_pretrained(
|
|||||||
controlnet_id,
|
controlnet_id,
|
||||||
torch_dtype=torch.float16,
|
torch_dtype=torch.float16,
|
||||||
variant="fp16",
|
variant="fp16",
|
||||||
).to(device)
|
)
|
||||||
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
||||||
base_model_id,
|
base_model_id,
|
||||||
controlnet=controlnet,
|
controlnet=controlnet,
|
||||||
torch_dtype=torch.float16,
|
torch_dtype=torch.float16,
|
||||||
variant="fp16",
|
variant="fp16",
|
||||||
).to(device)
|
)
|
||||||
pipe.enable_model_cpu_offload()
|
pipe.enable_model_cpu_offload()
|
||||||
|
|
||||||
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
|
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
|
||||||
@@ -301,13 +301,13 @@ controlnet = ControlNetModel.from_pretrained(
|
|||||||
controlnet_id,
|
controlnet_id,
|
||||||
torch_dtype=torch.float16,
|
torch_dtype=torch.float16,
|
||||||
variant="fp16",
|
variant="fp16",
|
||||||
).to(device)
|
)
|
||||||
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
||||||
base_model_id,
|
base_model_id,
|
||||||
controlnet=controlnet,
|
controlnet=controlnet,
|
||||||
torch_dtype=torch.float16,
|
torch_dtype=torch.float16,
|
||||||
variant="fp16",
|
variant="fp16",
|
||||||
).to(device)
|
)
|
||||||
pipe.enable_model_cpu_offload()
|
pipe.enable_model_cpu_offload()
|
||||||
|
|
||||||
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
|
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
|
||||||
|
|||||||
@@ -230,7 +230,7 @@ from diffusers.utils import load_image, make_image_grid
|
|||||||
|
|
||||||
pipeline = AutoPipelineForInpainting.from_pretrained(
|
pipeline = AutoPipelineForInpainting.from_pretrained(
|
||||||
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16"
|
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16"
|
||||||
).to("cuda")
|
)
|
||||||
pipeline.enable_model_cpu_offload()
|
pipeline.enable_model_cpu_offload()
|
||||||
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
|
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
|
||||||
pipeline.enable_xformers_memory_efficient_attention()
|
pipeline.enable_xformers_memory_efficient_attention()
|
||||||
@@ -255,7 +255,7 @@ from diffusers.utils import load_image, make_image_grid
|
|||||||
|
|
||||||
pipeline = AutoPipelineForInpainting.from_pretrained(
|
pipeline = AutoPipelineForInpainting.from_pretrained(
|
||||||
"runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16"
|
"runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16"
|
||||||
).to("cuda")
|
)
|
||||||
pipeline.enable_model_cpu_offload()
|
pipeline.enable_model_cpu_offload()
|
||||||
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
|
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
|
||||||
pipeline.enable_xformers_memory_efficient_attention()
|
pipeline.enable_xformers_memory_efficient_attention()
|
||||||
@@ -296,7 +296,7 @@ from diffusers.utils import load_image, make_image_grid
|
|||||||
|
|
||||||
pipeline = AutoPipelineForInpainting.from_pretrained(
|
pipeline = AutoPipelineForInpainting.from_pretrained(
|
||||||
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16"
|
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16"
|
||||||
).to("cuda")
|
)
|
||||||
pipeline.enable_model_cpu_offload()
|
pipeline.enable_model_cpu_offload()
|
||||||
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
|
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
|
||||||
pipeline.enable_xformers_memory_efficient_attention()
|
pipeline.enable_xformers_memory_efficient_attention()
|
||||||
@@ -319,7 +319,7 @@ from diffusers.utils import load_image, make_image_grid
|
|||||||
|
|
||||||
pipeline = AutoPipelineForInpainting.from_pretrained(
|
pipeline = AutoPipelineForInpainting.from_pretrained(
|
||||||
"runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16"
|
"runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16"
|
||||||
).to("cuda")
|
)
|
||||||
pipeline.enable_model_cpu_offload()
|
pipeline.enable_model_cpu_offload()
|
||||||
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
|
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
|
||||||
pipeline.enable_xformers_memory_efficient_attention()
|
pipeline.enable_xformers_memory_efficient_attention()
|
||||||
|
|||||||
@@ -277,7 +277,7 @@ images = pipeline(
|
|||||||
|
|
||||||
### IP-Adapter masking
|
### IP-Adapter masking
|
||||||
|
|
||||||
Binary masks specify which portion of the output image should be assigned to an IP-Adapter. This is useful for composing more than one IP-Adapter image. For each input IP-Adapter image, you must provide a binary mask an an IP-Adapter.
|
Binary masks specify which portion of the output image should be assigned to an IP-Adapter. This is useful for composing more than one IP-Adapter image. For each input IP-Adapter image, you must provide a binary mask.
|
||||||
|
|
||||||
To start, preprocess the input IP-Adapter images with the [`~image_processor.IPAdapterMaskProcessor.preprocess()`] to generate their masks. For optimal results, provide the output height and width to [`~image_processor.IPAdapterMaskProcessor.preprocess()`]. This ensures masks with different aspect ratios are appropriately stretched. If the input masks already match the aspect ratio of the generated image, you don't have to set the `height` and `width`.
|
To start, preprocess the input IP-Adapter images with the [`~image_processor.IPAdapterMaskProcessor.preprocess()`] to generate their masks. For optimal results, provide the output height and width to [`~image_processor.IPAdapterMaskProcessor.preprocess()`]. This ensures masks with different aspect ratios are appropriately stretched. If the input masks already match the aspect ratio of the generated image, you don't have to set the `height` and `width`.
|
||||||
|
|
||||||
@@ -305,13 +305,18 @@ masks = processor.preprocess([mask1, mask2], height=output_height, width=output_
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
When there is more than one input IP-Adapter image, load them as a list to ensure each image is assigned to a different IP-Adapter. Each of the input IP-Adapter images here correspond to the masks generated above.
|
When there is more than one input IP-Adapter image, load them as a list and provide the IP-Adapter scale list. Each of the input IP-Adapter images here corresponds to one of the masks generated above.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
|
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name=["ip-adapter-plus-face_sdxl_vit-h.safetensors"])
|
||||||
|
pipeline.set_ip_adapter_scale([[0.7, 0.7]]) # one scale for each image-mask pair
|
||||||
|
|
||||||
face_image1 = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_mask_girl1.png")
|
face_image1 = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_mask_girl1.png")
|
||||||
face_image2 = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_mask_girl2.png")
|
face_image2 = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_mask_girl2.png")
|
||||||
|
|
||||||
ip_images = [[face_image1], [face_image2]]
|
ip_images = [[face_image1, face_image2]]
|
||||||
|
|
||||||
|
masks = [masks.reshape(1, masks.shape[0], masks.shape[2], masks.shape[3])]
|
||||||
```
|
```
|
||||||
|
|
||||||
<div class="flex flex-row gap-4">
|
<div class="flex flex-row gap-4">
|
||||||
@@ -328,8 +333,6 @@ ip_images = [[face_image1], [face_image2]]
|
|||||||
Now pass the preprocessed masks to `cross_attention_kwargs` in the pipeline call.
|
Now pass the preprocessed masks to `cross_attention_kwargs` in the pipeline call.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name=["ip-adapter-plus-face_sdxl_vit-h.safetensors"] * 2)
|
|
||||||
pipeline.set_ip_adapter_scale([0.7] * 2)
|
|
||||||
generator = torch.Generator(device="cpu").manual_seed(0)
|
generator = torch.Generator(device="cpu").manual_seed(0)
|
||||||
num_images = 1
|
num_images = 1
|
||||||
|
|
||||||
@@ -436,7 +439,7 @@ image = torch.from_numpy(faces[0].normed_embedding)
|
|||||||
ref_images_embeds.append(image.unsqueeze(0))
|
ref_images_embeds.append(image.unsqueeze(0))
|
||||||
ref_images_embeds = torch.stack(ref_images_embeds, dim=0).unsqueeze(0)
|
ref_images_embeds = torch.stack(ref_images_embeds, dim=0).unsqueeze(0)
|
||||||
neg_ref_images_embeds = torch.zeros_like(ref_images_embeds)
|
neg_ref_images_embeds = torch.zeros_like(ref_images_embeds)
|
||||||
id_embeds = torch.cat([neg_ref_images_embeds, ref_images_embeds]).to(dtype=torch.float16, device="cuda"))
|
id_embeds = torch.cat([neg_ref_images_embeds, ref_images_embeds]).to(dtype=torch.float16, device="cuda")
|
||||||
|
|
||||||
generator = torch.Generator(device="cpu").manual_seed(42)
|
generator = torch.Generator(device="cpu").manual_seed(42)
|
||||||
|
|
||||||
@@ -452,13 +455,28 @@ images = pipeline(
|
|||||||
Both IP-Adapter FaceID Plus and Plus v2 models require CLIP image embeddings. You can prepare face embeddings as shown previously, then you can extract and pass CLIP embeddings to the hidden image projection layers.
|
Both IP-Adapter FaceID Plus and Plus v2 models require CLIP image embeddings. You can prepare face embeddings as shown previously, then you can extract and pass CLIP embeddings to the hidden image projection layers.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
clip_embeds = pipeline.prepare_ip_adapter_image_embeds([ip_adapter_images], None, torch.device("cuda"), num_images, True)[0]
|
from insightface.utils import face_align
|
||||||
|
|
||||||
|
ref_images_embeds = []
|
||||||
|
ip_adapter_images = []
|
||||||
|
app = FaceAnalysis(name="buffalo_l", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
|
||||||
|
app.prepare(ctx_id=0, det_size=(640, 640))
|
||||||
|
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_BGR2RGB)
|
||||||
|
faces = app.get(image)
|
||||||
|
ip_adapter_images.append(face_align.norm_crop(image, landmark=faces[0].kps, image_size=224))
|
||||||
|
image = torch.from_numpy(faces[0].normed_embedding)
|
||||||
|
ref_images_embeds.append(image.unsqueeze(0))
|
||||||
|
ref_images_embeds = torch.stack(ref_images_embeds, dim=0).unsqueeze(0)
|
||||||
|
neg_ref_images_embeds = torch.zeros_like(ref_images_embeds)
|
||||||
|
id_embeds = torch.cat([neg_ref_images_embeds, ref_images_embeds]).to(dtype=torch.float16, device="cuda")
|
||||||
|
|
||||||
|
clip_embeds = pipeline.prepare_ip_adapter_image_embeds(
|
||||||
|
[ip_adapter_images], None, torch.device("cuda"), num_images, True)[0]
|
||||||
|
|
||||||
pipeline.unet.encoder_hid_proj.image_projection_layers[0].clip_embeds = clip_embeds.to(dtype=torch.float16)
|
pipeline.unet.encoder_hid_proj.image_projection_layers[0].clip_embeds = clip_embeds.to(dtype=torch.float16)
|
||||||
pipeline.unet.encoder_hid_proj.image_projection_layers[0].shortcut = False # True if Plus v2
|
pipeline.unet.encoder_hid_proj.image_projection_layers[0].shortcut = False # True if Plus v2
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### Multi IP-Adapter
|
### Multi IP-Adapter
|
||||||
|
|
||||||
More than one IP-Adapter can be used at the same time to generate specific images in more diverse styles. For example, you can use IP-Adapter-Face to generate consistent faces and characters, and IP-Adapter Plus to generate those faces in a specific style.
|
More than one IP-Adapter can be used at the same time to generate specific images in more diverse styles. For example, you can use IP-Adapter-Face to generate consistent faces and characters, and IP-Adapter Plus to generate those faces in a specific style.
|
||||||
@@ -640,3 +658,87 @@ image
|
|||||||
<div class="flex justify-center">
|
<div class="flex justify-center">
|
||||||
<img src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ipa-controlnet-out.png" />
|
<img src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ipa-controlnet-out.png" />
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
### Style & layout control
|
||||||
|
|
||||||
|
[InstantStyle](https://arxiv.org/abs/2404.02733) is a plug-and-play method on top of IP-Adapter, which disentangles style and layout from image prompt to control image generation. This way, you can generate images following only the style or layout from image prompt, with significantly improved diversity. This is achieved by only activating IP-Adapters to specific parts of the model.
|
||||||
|
|
||||||
|
By default IP-Adapters are inserted to all layers of the model. Use the [`~loaders.IPAdapterMixin.set_ip_adapter_scale`] method with a dictionary to assign scales to IP-Adapter at different layers.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers import AutoPipelineForText2Image
|
||||||
|
from diffusers.utils import load_image
|
||||||
|
import torch
|
||||||
|
|
||||||
|
pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda")
|
||||||
|
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
|
||||||
|
|
||||||
|
scale = {
|
||||||
|
"down": {"block_2": [0.0, 1.0]},
|
||||||
|
"up": {"block_0": [0.0, 1.0, 0.0]},
|
||||||
|
}
|
||||||
|
pipeline.set_ip_adapter_scale(scale)
|
||||||
|
```
|
||||||
|
|
||||||
|
This will activate IP-Adapter at the second layer in the model's down-part block 2 and up-part block 0. The former is the layer where IP-Adapter injects layout information and the latter injects style. Inserting IP-Adapter to these two layers you can generate images following both the style and layout from image prompt, but with contents more aligned to text prompt.
|
||||||
|
|
||||||
|
```py
|
||||||
|
style_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg")
|
||||||
|
|
||||||
|
generator = torch.Generator(device="cpu").manual_seed(26)
|
||||||
|
image = pipeline(
|
||||||
|
prompt="a cat, masterpiece, best quality, high quality",
|
||||||
|
ip_adapter_image=style_image,
|
||||||
|
negative_prompt="text, watermark, lowres, low quality, worst quality, deformed, glitch, low contrast, noisy, saturation, blurry",
|
||||||
|
guidance_scale=5,
|
||||||
|
num_inference_steps=30,
|
||||||
|
generator=generator,
|
||||||
|
).images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex flex-row gap-4">
|
||||||
|
<div class="flex-1">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter image</figcaption>
|
||||||
|
</div>
|
||||||
|
<div class="flex-1">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
In contrast, inserting IP-Adapter to all layers will often generate images that overly focus on image prompt and diminish diversity.
|
||||||
|
|
||||||
|
Activate IP-Adapter only in the style layer and then call the pipeline again.
|
||||||
|
|
||||||
|
```py
|
||||||
|
scale = {
|
||||||
|
"up": {"block_0": [0.0, 1.0, 0.0]},
|
||||||
|
}
|
||||||
|
pipeline.set_ip_adapter_scale(scale)
|
||||||
|
|
||||||
|
generator = torch.Generator(device="cpu").manual_seed(26)
|
||||||
|
image = pipeline(
|
||||||
|
prompt="a cat, masterpiece, best quality, high quality",
|
||||||
|
ip_adapter_image=style_image,
|
||||||
|
negative_prompt="text, watermark, lowres, low quality, worst quality, deformed, glitch, low contrast, noisy, saturation, blurry",
|
||||||
|
guidance_scale=5,
|
||||||
|
num_inference_steps=30,
|
||||||
|
generator=generator,
|
||||||
|
).images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex flex-row gap-4">
|
||||||
|
<div class="flex-1">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_only.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter only in style layer</figcaption>
|
||||||
|
</div>
|
||||||
|
<div class="flex-1">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_ip_adapter.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter in all layers</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
Note that you don't have to specify all layers in the dictionary. Those not included in the dictionary will be set to scale 0 which means disable IP-Adapter by default.
|
||||||
|
|||||||
@@ -154,7 +154,10 @@ When you load multiple pipelines that share the same model components, it makes
|
|||||||
1. You generated an image with the [`StableDiffusionPipeline`] but you want to improve its quality with the [`StableDiffusionSAGPipeline`]. Both of these pipelines share the same pretrained model, so it'd be a waste of memory to load the same model twice.
|
1. You generated an image with the [`StableDiffusionPipeline`] but you want to improve its quality with the [`StableDiffusionSAGPipeline`]. Both of these pipelines share the same pretrained model, so it'd be a waste of memory to load the same model twice.
|
||||||
2. You want to add a model component, like a [`MotionAdapter`](../api/pipelines/animatediff#animatediffpipeline), to [`AnimateDiffPipeline`] which was instantiated from an existing [`StableDiffusionPipeline`]. Again, both pipelines share the same pretrained model, so it'd be a waste of memory to load an entirely new pipeline again.
|
2. You want to add a model component, like a [`MotionAdapter`](../api/pipelines/animatediff#animatediffpipeline), to [`AnimateDiffPipeline`] which was instantiated from an existing [`StableDiffusionPipeline`]. Again, both pipelines share the same pretrained model, so it'd be a waste of memory to load an entirely new pipeline again.
|
||||||
|
|
||||||
With the [`DiffusionPipeline.from_pipe`] API, you can switch between multiple pipelines to take advantage of their different features without increasing memory-usage. It is similar to turning on and off a feature in your pipeline. To switch between tasks, use the [`~DiffusionPipeline.from_pipe`] method with the [`AutoPipeline`](../api/pipelines/auto_pipeline) class, which automatically identifies the pipeline class based on the task (learn more in the [AutoPipeline](../tutorials/autopipeline) tutorial).
|
With the [`DiffusionPipeline.from_pipe`] API, you can switch between multiple pipelines to take advantage of their different features without increasing memory-usage. It is similar to turning on and off a feature in your pipeline.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> To switch between tasks (rather than features), use the [`~DiffusionPipeline.from_pipe`] method with the [AutoPipeline](../api/pipelines/auto_pipeline) class, which automatically identifies the pipeline class based on the task (learn more in the [AutoPipeline](../tutorials/autopipeline) tutorial).
|
||||||
|
|
||||||
Let's start with a [`StableDiffusionPipeline`] and then reuse the loaded model components to create a [`StableDiffusionSAGPipeline`] to increase generation quality. You'll use the [`StableDiffusionPipeline`] with an [IP-Adapter](./ip_adapter) to generate a bear eating pizza.
|
Let's start with a [`StableDiffusionPipeline`] and then reuse the loaded model components to create a [`StableDiffusionSAGPipeline`] to increase generation quality. You'll use the [`StableDiffusionPipeline`] with an [IP-Adapter](./ip_adapter) to generate a bear eating pizza.
|
||||||
|
|
||||||
|
|||||||
466
docs/source/en/using-diffusers/marigold_usage.md
Normal file
466
docs/source/en/using-diffusers/marigold_usage.md
Normal file
@@ -0,0 +1,466 @@
|
|||||||
|
<!--Copyright 2024 Marigold authors and The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Marigold Pipelines for Computer Vision Tasks
|
||||||
|
|
||||||
|
[Marigold](../api/pipelines/marigold) is a novel diffusion-based dense prediction approach, and a set of pipelines for various computer vision tasks, such as monocular depth estimation.
|
||||||
|
|
||||||
|
This guide will show you how to use Marigold to obtain fast and high-quality predictions for images and videos.
|
||||||
|
|
||||||
|
Each pipeline supports one Computer Vision task, which takes an input RGB image as input and produces a *prediction* of the modality of interest, such as a depth map of the input image.
|
||||||
|
Currently, the following tasks are implemented:
|
||||||
|
|
||||||
|
| Pipeline | Predicted Modalities | Demos |
|
||||||
|
|---------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------:|
|
||||||
|
| [MarigoldDepthPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py) | [Depth](https://en.wikipedia.org/wiki/Depth_map), [Disparity](https://en.wikipedia.org/wiki/Binocular_disparity) | [Fast Demo (LCM)](https://huggingface.co/spaces/prs-eth/marigold-lcm), [Slow Original Demo (DDIM)](https://huggingface.co/spaces/prs-eth/marigold) |
|
||||||
|
| [MarigoldNormalsPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py) | [Surface normals](https://en.wikipedia.org/wiki/Normal_mapping) | [Fast Demo (LCM)](https://huggingface.co/spaces/prs-eth/marigold-normals-lcm) |
|
||||||
|
|
||||||
|
The original checkpoints can be found under the [PRS-ETH](https://huggingface.co/prs-eth/) Hugging Face organization.
|
||||||
|
These checkpoints are meant to work with diffusers pipelines and the [original codebase](https://github.com/prs-eth/marigold).
|
||||||
|
The original code can also be used to train new checkpoints.
|
||||||
|
|
||||||
|
| Checkpoint | Modality | Comment |
|
||||||
|
|-----------------------------------------------------------------------------------------------|----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| [prs-eth/marigold-v1-0](https://huggingface.co/prs-eth/marigold-v1-0) | Depth | The first Marigold Depth checkpoint, which predicts *affine-invariant depth* maps. The performance of this checkpoint in benchmarks was studied in the original [paper](https://huggingface.co/papers/2312.02145). Designed to be used with the `DDIMScheduler` at inference, it requires at least 10 steps to get reliable predictions. Affine-invariant depth prediction has a range of values in each pixel between 0 (near plane) and 1 (far plane); both planes are chosen by the model as part of the inference process. See the `MarigoldImageProcessor` reference for visualization utilities. |
|
||||||
|
| [prs-eth/marigold-depth-lcm-v1-0](https://huggingface.co/prs-eth/marigold-depth-lcm-v1-0) | Depth | The fast Marigold Depth checkpoint, fine-tuned from `prs-eth/marigold-v1-0`. Designed to be used with the `LCMScheduler` at inference, it requires as little as 1 step to get reliable predictions. The prediction reliability saturates at 4 steps and declines after that. |
|
||||||
|
| [prs-eth/marigold-normals-v0-1](https://huggingface.co/prs-eth/marigold-normals-v0-1) | Normals | A preview checkpoint for the Marigold Normals pipeline. Designed to be used with the `DDIMScheduler` at inference, it requires at least 10 steps to get reliable predictions. The surface normals predictions are unit-length 3D vectors with values in the range from -1 to 1. *This checkpoint will be phased out after the release of `v1-0` version.* |
|
||||||
|
| [prs-eth/marigold-normals-lcm-v0-1](https://huggingface.co/prs-eth/marigold-normals-lcm-v0-1) | Normals | The fast Marigold Normals checkpoint, fine-tuned from `prs-eth/marigold-normals-v0-1`. Designed to be used with the `LCMScheduler` at inference, it requires as little as 1 step to get reliable predictions. The prediction reliability saturates at 4 steps and declines after that. *This checkpoint will be phased out after the release of `v1-0` version.* |
|
||||||
|
The examples below are mostly given for depth prediction, but they can be universally applied with other supported modalities.
|
||||||
|
We showcase the predictions using the same input image of Albert Einstein generated by Midjourney.
|
||||||
|
This makes it easier to compare visualizations of the predictions across various modalities and checkpoints.
|
||||||
|
|
||||||
|
<div class="flex gap-4" style="justify-content: center; width: 100%;">
|
||||||
|
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||||
|
<img class="rounded-xl" src="https://marigoldmonodepth.github.io/images/einstein.jpg"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||||
|
Example input image for all Marigold pipelines
|
||||||
|
</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
### Depth Prediction Quick Start
|
||||||
|
|
||||||
|
To get the first depth prediction, load `prs-eth/marigold-depth-lcm-v1-0` checkpoint into `MarigoldDepthPipeline` pipeline, put the image through the pipeline, and save the predictions:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import diffusers
|
||||||
|
import torch
|
||||||
|
|
||||||
|
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||||
|
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||||
|
depth = pipe(image)
|
||||||
|
|
||||||
|
vis = pipe.image_processor.visualize_depth(depth.prediction)
|
||||||
|
vis[0].save("einstein_depth.png")
|
||||||
|
|
||||||
|
depth_16bit = pipe.image_processor.export_depth_to_16bit_png(depth.prediction)
|
||||||
|
depth_16bit[0].save("einstein_depth_16bit.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
The visualization function for depth [`~pipelines.marigold.marigold_image_processing.MarigoldImageProcessor.visualize_depth`] applies one of [matplotlib's colormaps](https://matplotlib.org/stable/users/explain/colors/colormaps.html) (`Spectral` by default) to map the predicted pixel values from a single-channel `[0, 1]` depth range into an RGB image.
|
||||||
|
With the `Spectral` colormap, pixels with near depth are painted red, and far pixels are assigned blue color.
|
||||||
|
The 16-bit PNG file stores the single channel values mapped linearly from the `[0, 1]` range into `[0, 65535]`.
|
||||||
|
Below are the raw and the visualized predictions; as can be seen, dark areas (mustache) are easier to distinguish in the visualization:
|
||||||
|
|
||||||
|
<div class="flex gap-4">
|
||||||
|
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_depth_16bit.png"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||||
|
Predicted depth (16-bit PNG)
|
||||||
|
</figcaption>
|
||||||
|
</div>
|
||||||
|
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_depth.png"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||||
|
Predicted depth visualization (Spectral)
|
||||||
|
</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
### Surface Normals Prediction Quick Start
|
||||||
|
|
||||||
|
Load `prs-eth/marigold-normals-lcm-v0-1` checkpoint into `MarigoldNormalsPipeline` pipeline, put the image through the pipeline, and save the predictions:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import diffusers
|
||||||
|
import torch
|
||||||
|
|
||||||
|
pipe = diffusers.MarigoldNormalsPipeline.from_pretrained(
|
||||||
|
"prs-eth/marigold-normals-lcm-v0-1", variant="fp16", torch_dtype=torch.float16
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||||
|
normals = pipe(image)
|
||||||
|
|
||||||
|
vis = pipe.image_processor.visualize_normals(normals.prediction)
|
||||||
|
vis[0].save("einstein_normals.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
The visualization function for normals [`~pipelines.marigold.marigold_image_processing.MarigoldImageProcessor.visualize_normals`] maps the three-dimensional prediction with pixel values in the range `[-1, 1]` into an RGB image.
|
||||||
|
The visualization function supports flipping surface normals axes to make the visualization compatible with other choices of the frame of reference.
|
||||||
|
Conceptually, each pixel is painted according to the surface normal vector in the frame of reference, where `X` axis points right, `Y` axis points up, and `Z` axis points at the viewer.
|
||||||
|
Below is the visualized prediction:
|
||||||
|
|
||||||
|
<div class="flex gap-4" style="justify-content: center; width: 100%;">
|
||||||
|
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_normals.png"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||||
|
Predicted surface normals visualization
|
||||||
|
</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
In this example, the nose tip almost certainly has a point on the surface, in which the surface normal vector points straight at the viewer, meaning that its coordinates are `[0, 0, 1]`.
|
||||||
|
This vector maps to the RGB `[128, 128, 255]`, which corresponds to the violet-blue color.
|
||||||
|
Similarly, a surface normal on the cheek in the right part of the image has a large `X` component, which increases the red hue.
|
||||||
|
Points on the shoulders pointing up with a large `Y` promote green color.
|
||||||
|
|
||||||
|
### Speeding up inference
|
||||||
|
|
||||||
|
The above quick start snippets are already optimized for speed: they load the LCM checkpoint, use the `fp16` variant of weights and computation, and perform just one denoising diffusion step.
|
||||||
|
The `pipe(image)` call completes in 280ms on RTX 3090 GPU.
|
||||||
|
Internally, the input image is encoded with the Stable Diffusion VAE encoder, then the U-Net performs one denoising step, and finally, the prediction latent is decoded with the VAE decoder into pixel space.
|
||||||
|
In this case, two out of three module calls are dedicated to converting between pixel and latent space of LDM.
|
||||||
|
Because Marigold's latent space is compatible with the base Stable Diffusion, it is possible to speed up the pipeline call by more than 3x (85ms on RTX 3090) by using a [lightweight replacement of the SD VAE](../api/models/autoencoder_tiny):
|
||||||
|
|
||||||
|
```diff
|
||||||
|
import diffusers
|
||||||
|
import torch
|
||||||
|
|
||||||
|
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||||
|
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
+ pipe.vae = diffusers.AutoencoderTiny.from_pretrained(
|
||||||
|
+ "madebyollin/taesd", torch_dtype=torch.float16
|
||||||
|
+ ).cuda()
|
||||||
|
|
||||||
|
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||||
|
depth = pipe(image)
|
||||||
|
```
|
||||||
|
|
||||||
|
As suggested in [Optimizations](../optimization/torch2.0#torch.compile), adding `torch.compile` may squeeze extra performance depending on the target hardware:
|
||||||
|
|
||||||
|
```diff
|
||||||
|
import diffusers
|
||||||
|
import torch
|
||||||
|
|
||||||
|
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||||
|
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
||||||
|
|
||||||
|
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||||
|
depth = pipe(image)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Qualitative Comparison with Depth Anything
|
||||||
|
|
||||||
|
With the above speed optimizations, Marigold delivers predictions with more details and faster than [Depth Anything](https://huggingface.co/docs/transformers/main/en/model_doc/depth_anything) with the largest checkpoint [LiheYoung/depth-anything-large-hf](https://huggingface.co/LiheYoung/depth-anything-large-hf):
|
||||||
|
|
||||||
|
<div class="flex gap-4">
|
||||||
|
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_depth.png"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||||
|
Marigold LCM fp16 with Tiny AutoEncoder
|
||||||
|
</figcaption>
|
||||||
|
</div>
|
||||||
|
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/einstein_depthanything_large.png"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||||
|
Depth Anything Large
|
||||||
|
</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## Maximizing Precision and Ensembling
|
||||||
|
|
||||||
|
Marigold pipelines have a built-in ensembling mechanism combining multiple predictions from different random latents.
|
||||||
|
This is a brute-force way of improving the precision of predictions, capitalizing on the generative nature of diffusion.
|
||||||
|
The ensembling path is activated automatically when the `ensemble_size` argument is set greater than `1`.
|
||||||
|
When aiming for maximum precision, it makes sense to adjust `num_inference_steps` simultaneously with `ensemble_size`.
|
||||||
|
The recommended values vary across checkpoints but primarily depend on the scheduler type.
|
||||||
|
The effect of ensembling is particularly well-seen with surface normals:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import diffusers
|
||||||
|
|
||||||
|
model_path = "prs-eth/marigold-normals-v1-0"
|
||||||
|
|
||||||
|
model_paper_kwargs = {
|
||||||
|
diffusers.schedulers.DDIMScheduler: {
|
||||||
|
"num_inference_steps": 10,
|
||||||
|
"ensemble_size": 10,
|
||||||
|
},
|
||||||
|
diffusers.schedulers.LCMScheduler: {
|
||||||
|
"num_inference_steps": 4,
|
||||||
|
"ensemble_size": 5,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||||
|
|
||||||
|
pipe = diffusers.MarigoldNormalsPipeline.from_pretrained(model_path).to("cuda")
|
||||||
|
pipe_kwargs = model_paper_kwargs[type(pipe.scheduler)]
|
||||||
|
|
||||||
|
depth = pipe(image, **pipe_kwargs)
|
||||||
|
|
||||||
|
vis = pipe.image_processor.visualize_normals(depth.prediction)
|
||||||
|
vis[0].save("einstein_normals.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex gap-4">
|
||||||
|
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_normals.png"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||||
|
Surface normals, no ensembling
|
||||||
|
</figcaption>
|
||||||
|
</div>
|
||||||
|
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_normals.png"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||||
|
Surface normals, with ensembling
|
||||||
|
</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
As can be seen, all areas with fine-grained structurers, such as hair, got more conservative and on average more correct predictions.
|
||||||
|
Such a result is more suitable for precision-sensitive downstream tasks, such as 3D reconstruction.
|
||||||
|
|
||||||
|
## Quantitative Evaluation
|
||||||
|
|
||||||
|
To evaluate Marigold quantitatively in standard leaderboards and benchmarks (such as NYU, KITTI, and other datasets), follow the evaluation protocol outlined in the paper: load the full precision fp32 model and use appropriate values for `num_inference_steps` and `ensemble_size`.
|
||||||
|
Optionally seed randomness to ensure reproducibility. Maximizing `batch_size` will deliver maximum device utilization.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import diffusers
|
||||||
|
import torch
|
||||||
|
|
||||||
|
device = "cuda"
|
||||||
|
seed = 2024
|
||||||
|
model_path = "prs-eth/marigold-v1-0"
|
||||||
|
|
||||||
|
model_paper_kwargs = {
|
||||||
|
diffusers.schedulers.DDIMScheduler: {
|
||||||
|
"num_inference_steps": 50,
|
||||||
|
"ensemble_size": 10,
|
||||||
|
},
|
||||||
|
diffusers.schedulers.LCMScheduler: {
|
||||||
|
"num_inference_steps": 4,
|
||||||
|
"ensemble_size": 10,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||||
|
|
||||||
|
generator = torch.Generator(device=device).manual_seed(seed)
|
||||||
|
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(model_path).to(device)
|
||||||
|
pipe_kwargs = model_paper_kwargs[type(pipe.scheduler)]
|
||||||
|
|
||||||
|
depth = pipe(image, generator=generator, **pipe_kwargs)
|
||||||
|
|
||||||
|
# evaluate metrics
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using Predictive Uncertainty
|
||||||
|
|
||||||
|
The ensembling mechanism built into Marigold pipelines combines multiple predictions obtained from different random latents.
|
||||||
|
As a side effect, it can be used to quantify epistemic (model) uncertainty; simply specify `ensemble_size` greater than 1 and set `output_uncertainty=True`.
|
||||||
|
The resulting uncertainty will be available in the `uncertainty` field of the output.
|
||||||
|
It can be visualized as follows:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import diffusers
|
||||||
|
import torch
|
||||||
|
|
||||||
|
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||||
|
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||||
|
depth = pipe(
|
||||||
|
image,
|
||||||
|
ensemble_size=10, # any number greater than 1; higher values yield higher precision
|
||||||
|
output_uncertainty=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
uncertainty = pipe.image_processor.visualize_uncertainty(depth.uncertainty)
|
||||||
|
uncertainty[0].save("einstein_depth_uncertainty.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex gap-4">
|
||||||
|
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_depth_uncertainty.png"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||||
|
Depth uncertainty
|
||||||
|
</figcaption>
|
||||||
|
</div>
|
||||||
|
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_normals_uncertainty.png"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||||
|
Surface normals uncertainty
|
||||||
|
</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
The interpretation of uncertainty is easy: higher values (white) correspond to pixels, where the model struggles to make consistent predictions.
|
||||||
|
Evidently, the depth model is the least confident around edges with discontinuity, where the object depth changes drastically.
|
||||||
|
The surface normals model is the least confident in fine-grained structures, such as hair, and dark areas, such as the collar.
|
||||||
|
|
||||||
|
## Frame-by-frame Video Processing with Temporal Consistency
|
||||||
|
|
||||||
|
Due to Marigold's generative nature, each prediction is unique and defined by the random noise sampled for the latent initialization.
|
||||||
|
This becomes an obvious drawback compared to traditional end-to-end dense regression networks, as exemplified in the following videos:
|
||||||
|
|
||||||
|
<div class="flex gap-4">
|
||||||
|
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_obama.gif"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">Input video</figcaption>
|
||||||
|
</div>
|
||||||
|
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_obama_depth_independent.gif"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">Marigold Depth applied to input video frames independently</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
To address this issue, it is possible to pass `latents` argument to the pipelines, which defines the starting point of diffusion.
|
||||||
|
Empirically, we found that a convex combination of the very same starting point noise latent and the latent corresponding to the previous frame prediction give sufficiently smooth results, as implemented in the snippet below:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import imageio
|
||||||
|
from PIL import Image
|
||||||
|
from tqdm import tqdm
|
||||||
|
import diffusers
|
||||||
|
import torch
|
||||||
|
|
||||||
|
device = "cuda"
|
||||||
|
path_in = "obama.mp4"
|
||||||
|
path_out = "obama_depth.gif"
|
||||||
|
|
||||||
|
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||||
|
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
|
||||||
|
).to(device)
|
||||||
|
pipe.vae = diffusers.AutoencoderTiny.from_pretrained(
|
||||||
|
"madebyollin/taesd", torch_dtype=torch.float16
|
||||||
|
).to(device)
|
||||||
|
pipe.set_progress_bar_config(disable=True)
|
||||||
|
|
||||||
|
with imageio.get_reader(path_in) as reader:
|
||||||
|
size = reader.get_meta_data()['size']
|
||||||
|
last_frame_latent = None
|
||||||
|
latent_common = torch.randn(
|
||||||
|
(1, 4, 768 * size[1] // (8 * max(size)), 768 * size[0] // (8 * max(size)))
|
||||||
|
).to(device=device, dtype=torch.float16)
|
||||||
|
|
||||||
|
out = []
|
||||||
|
for frame_id, frame in tqdm(enumerate(reader), desc="Processing Video"):
|
||||||
|
frame = Image.fromarray(frame)
|
||||||
|
latents = latent_common
|
||||||
|
if last_frame_latent is not None:
|
||||||
|
latents = 0.9 * latents + 0.1 * last_frame_latent
|
||||||
|
|
||||||
|
depth = pipe(
|
||||||
|
frame, match_input_resolution=False, latents=latents, output_latent=True
|
||||||
|
)
|
||||||
|
last_frame_latent = depth.latent
|
||||||
|
out.append(pipe.image_processor.visualize_depth(depth.prediction)[0])
|
||||||
|
|
||||||
|
diffusers.utils.export_to_gif(out, path_out, fps=reader.get_meta_data()['fps'])
|
||||||
|
```
|
||||||
|
|
||||||
|
Here, the diffusion process starts from the given computed latent.
|
||||||
|
The pipeline sets `output_latent=True` to access `out.latent` and computes its contribution to the next frame's latent initialization.
|
||||||
|
The result is much more stable now:
|
||||||
|
|
||||||
|
<div class="flex gap-4">
|
||||||
|
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_obama_depth_independent.gif"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">Marigold Depth applied to input video frames independently</figcaption>
|
||||||
|
</div>
|
||||||
|
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_obama_depth_consistent.gif"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">Marigold Depth with forced latents initialization</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## Marigold for ControlNet
|
||||||
|
|
||||||
|
A very common application for depth prediction with diffusion models comes in conjunction with ControlNet.
|
||||||
|
Depth crispness plays a crucial role in obtaining high-quality results from ControlNet.
|
||||||
|
As seen in comparisons with other methods above, Marigold excels at that task.
|
||||||
|
The snippet below demonstrates how to load an image, compute depth, and pass it into ControlNet in a compatible format:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
import diffusers
|
||||||
|
|
||||||
|
device = "cuda"
|
||||||
|
generator = torch.Generator(device=device).manual_seed(2024)
|
||||||
|
image = diffusers.utils.load_image(
|
||||||
|
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_depth_source.png"
|
||||||
|
)
|
||||||
|
|
||||||
|
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||||
|
"prs-eth/marigold-depth-lcm-v1-0", torch_dtype=torch.float16, variant="fp16"
|
||||||
|
).to(device)
|
||||||
|
|
||||||
|
depth_image = pipe(image, generator=generator).prediction
|
||||||
|
depth_image = pipe.image_processor.visualize_depth(depth_image, color_map="binary")
|
||||||
|
depth_image[0].save("motorcycle_controlnet_depth.png")
|
||||||
|
|
||||||
|
controlnet = diffusers.ControlNetModel.from_pretrained(
|
||||||
|
"diffusers/controlnet-depth-sdxl-1.0", torch_dtype=torch.float16, variant="fp16"
|
||||||
|
).to(device)
|
||||||
|
pipe = diffusers.StableDiffusionXLControlNetPipeline.from_pretrained(
|
||||||
|
"SG161222/RealVisXL_V4.0", torch_dtype=torch.float16, variant="fp16", controlnet=controlnet
|
||||||
|
).to(device)
|
||||||
|
pipe.scheduler = diffusers.DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True)
|
||||||
|
|
||||||
|
controlnet_out = pipe(
|
||||||
|
prompt="high quality photo of a sports bike, city",
|
||||||
|
negative_prompt="",
|
||||||
|
guidance_scale=6.5,
|
||||||
|
num_inference_steps=25,
|
||||||
|
image=depth_image,
|
||||||
|
controlnet_conditioning_scale=0.7,
|
||||||
|
control_guidance_end=0.7,
|
||||||
|
generator=generator,
|
||||||
|
).images
|
||||||
|
controlnet_out[0].save("motorcycle_controlnet_out.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex gap-4">
|
||||||
|
<div style="flex: 1 1 33%; max-width: 33%;">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_depth_source.png"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||||
|
Input image
|
||||||
|
</figcaption>
|
||||||
|
</div>
|
||||||
|
<div style="flex: 1 1 33%; max-width: 33%;">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/motorcycle_controlnet_depth.png"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||||
|
Depth in the format compatible with ControlNet
|
||||||
|
</figcaption>
|
||||||
|
</div>
|
||||||
|
<div style="flex: 1 1 33%; max-width: 33%;">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/motorcycle_controlnet_out.png"/>
|
||||||
|
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||||
|
ControlNet generation, conditioned on depth and prompt: "high quality photo of a sports bike, city"
|
||||||
|
</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
Hopefully, you will find Marigold useful for solving your downstream tasks, be it a part of a more broad generative workflow, or a perception task, such as 3D reconstruction.
|
||||||
@@ -10,156 +10,86 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
|
|||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Load different Stable Diffusion formats
|
# Model files and layouts
|
||||||
|
|
||||||
[[open-in-colab]]
|
[[open-in-colab]]
|
||||||
|
|
||||||
Stable Diffusion models are available in different formats depending on the framework they're trained and saved with, and where you download them from. Converting these formats for use in 🤗 Diffusers allows you to use all the features supported by the library, such as [using different schedulers](schedulers) for inference, [building your custom pipeline](write_own_pipeline), and a variety of techniques and methods for [optimizing inference speed](../optimization/opt_overview).
|
Diffusion models are saved in various file types and organized in different layouts. Diffusers stores model weights as safetensors files in *Diffusers-multifolder* layout and it also supports loading files (like safetensors and ckpt files) from a *single-file* layout which is commonly used in the diffusion ecosystem.
|
||||||
|
|
||||||
<Tip>
|
Each layout has its own benefits and use cases, and this guide will show you how to load the different files and layouts, and how to convert them.
|
||||||
|
|
||||||
We highly recommend using the `.safetensors` format because it is more secure than traditional pickled files which are vulnerable and can be exploited to execute any code on your machine (learn more in the [Load safetensors](using_safetensors) guide).
|
## Files
|
||||||
|
|
||||||
</Tip>
|
PyTorch model weights are typically saved with Python's [pickle](https://docs.python.org/3/library/pickle.html) utility as ckpt or bin files. However, pickle is not secure and pickled files may contain malicious code that can be executed. This vulnerability is a serious concern given the popularity of model sharing. To address this security issue, the [Safetensors](https://hf.co/docs/safetensors) library was developed as a secure alternative to pickle, which saves models as safetensors files.
|
||||||
|
|
||||||
This guide will show you how to convert other Stable Diffusion formats to be compatible with 🤗 Diffusers.
|
### safetensors
|
||||||
|
|
||||||
## PyTorch .ckpt
|
> [!TIP]
|
||||||
|
> Learn more about the design decisions and why safetensor files are preferred for saving and loading model weights in the [Safetensors audited as really safe and becoming the default](https://blog.eleuther.ai/safetensors-security-audit/) blog post.
|
||||||
|
|
||||||
The checkpoint - or `.ckpt` - format is commonly used to store and save models. The `.ckpt` file contains the entire model and is typically several GBs in size. While you can load and use a `.ckpt` file directly with the [`~StableDiffusionPipeline.from_single_file`] method, it is generally better to convert the `.ckpt` file to 🤗 Diffusers so both formats are available.
|
[Safetensors](https://hf.co/docs/safetensors) is a safe and fast file format for securely storing and loading tensors. Safetensors restricts the header size to limit certain types of attacks, supports lazy loading (useful for distributed setups), and has generally faster loading speeds.
|
||||||
|
|
||||||
There are two options for converting a `.ckpt` file: use a Space to convert the checkpoint or convert the `.ckpt` file with a script.
|
Make sure you have the [Safetensors](https://hf.co/docs/safetensors) library installed.
|
||||||
|
|
||||||
### Convert with a Space
|
```py
|
||||||
|
!pip install safetensors
|
||||||
The easiest and most convenient way to convert a `.ckpt` file is to use the [SD to Diffusers](https://huggingface.co/spaces/diffusers/sd-to-diffusers) Space. You can follow the instructions on the Space to convert the `.ckpt` file.
|
|
||||||
|
|
||||||
This approach works well for basic models, but it may struggle with more customized models. You'll know the Space failed if it returns an empty pull request or error. In this case, you can try converting the `.ckpt` file with a script.
|
|
||||||
|
|
||||||
### Convert with a script
|
|
||||||
|
|
||||||
🤗 Diffusers provides a [conversion script](https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py) for converting `.ckpt` files. This approach is more reliable than the Space above.
|
|
||||||
|
|
||||||
Before you start, make sure you have a local clone of 🤗 Diffusers to run the script and log in to your Hugging Face account so you can open pull requests and push your converted model to the Hub.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
huggingface-cli login
|
|
||||||
```
|
```
|
||||||
|
|
||||||
To use the script:
|
Safetensors stores weights in a safetensors file. Diffusers loads safetensors files by default if they're available and the Safetensors library is installed. There are two ways safetensors files can be organized:
|
||||||
|
|
||||||
1. Git clone the repository containing the `.ckpt` file you want to convert. For this example, let's convert this [TemporalNet](https://huggingface.co/CiaraRowles/TemporalNet) `.ckpt` file:
|
1. Diffusers-multifolder layout: there may be several separate safetensors files, one for each pipeline component (text encoder, UNet, VAE), organized in subfolders (check out the [runwayml/stable-diffusion-v1-5](https://hf.co/runwayml/stable-diffusion-v1-5/tree/main) repository as an example)
|
||||||
|
2. single-file layout: all the model weights may be saved in a single file (check out the [WarriorMama777/OrangeMixs](https://hf.co/WarriorMama777/OrangeMixs/tree/main/Models/AbyssOrangeMix) repository as an example)
|
||||||
|
|
||||||
```bash
|
<hfoptions id="safetensors">
|
||||||
git lfs install
|
<hfoption id="multifolder">
|
||||||
git clone https://huggingface.co/CiaraRowles/TemporalNet
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Open a pull request on the repository where you're converting the checkpoint from:
|
Use the [`~DiffusionPipeline.from_pretrained`] method to load a model with safetensors files stored in multiple folders.
|
||||||
|
|
||||||
```bash
|
|
||||||
cd TemporalNet && git fetch origin refs/pr/13:pr/13
|
|
||||||
git checkout pr/13
|
|
||||||
```
|
|
||||||
|
|
||||||
3. There are several input arguments to configure in the conversion script, but the most important ones are:
|
|
||||||
|
|
||||||
- `checkpoint_path`: the path to the `.ckpt` file to convert.
|
|
||||||
- `original_config_file`: a YAML file defining the configuration of the original architecture. If you can't find this file, try searching for the YAML file in the GitHub repository where you found the `.ckpt` file.
|
|
||||||
- `dump_path`: the path to the converted model.
|
|
||||||
|
|
||||||
For example, you can take the `cldm_v15.yaml` file from the [ControlNet](https://github.com/lllyasviel/ControlNet/tree/main/models) repository because the TemporalNet model is a Stable Diffusion v1.5 and ControlNet model.
|
|
||||||
|
|
||||||
4. Now you can run the script to convert the `.ckpt` file:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python ../diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path temporalnetv3.ckpt --original_config_file cldm_v15.yaml --dump_path ./ --controlnet
|
|
||||||
```
|
|
||||||
|
|
||||||
5. Once the conversion is done, upload your converted model and test out the resulting [pull request](https://huggingface.co/CiaraRowles/TemporalNet/discussions/13)!
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git push origin pr/13:refs/pr/13
|
|
||||||
```
|
|
||||||
|
|
||||||
## Keras .pb or .h5
|
|
||||||
|
|
||||||
<Tip warning={true}>
|
|
||||||
|
|
||||||
🧪 This is an experimental feature. Only Stable Diffusion v1 checkpoints are supported by the Convert KerasCV Space at the moment.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
[KerasCV](https://keras.io/keras_cv/) supports training for [Stable Diffusion](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/stable_diffusion) v1 and v2. However, it offers limited support for experimenting with Stable Diffusion models for inference and deployment whereas 🤗 Diffusers has a more complete set of features for this purpose, such as different [noise schedulers](https://huggingface.co/docs/diffusers/using-diffusers/schedulers), [flash attention](https://huggingface.co/docs/diffusers/optimization/xformers), and [other
|
|
||||||
optimization techniques](https://huggingface.co/docs/diffusers/optimization/fp16).
|
|
||||||
|
|
||||||
The [Convert KerasCV](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers) Space converts `.pb` or `.h5` files to PyTorch, and then wraps them in a [`StableDiffusionPipeline`] so it is ready for inference. The converted checkpoint is stored in a repository on the Hugging Face Hub.
|
|
||||||
|
|
||||||
For this example, let's convert the [`sayakpaul/textual-inversion-kerasio`](https://huggingface.co/sayakpaul/textual-inversion-kerasio/tree/main) checkpoint which was trained with Textual Inversion. It uses the special token `<my-funny-cat>` to personalize images with cats.
|
|
||||||
|
|
||||||
The Convert KerasCV Space allows you to input the following:
|
|
||||||
|
|
||||||
* Your Hugging Face token.
|
|
||||||
* Paths to download the UNet and text encoder weights from. Depending on how the model was trained, you don't necessarily need to provide the paths to both the UNet and text encoder. For example, Textual Inversion only requires the embeddings from the text encoder and a text-to-image model only requires the UNet weights.
|
|
||||||
* Placeholder token is only applicable for textual inversion models.
|
|
||||||
* The `output_repo_prefix` is the name of the repository where the converted model is stored.
|
|
||||||
|
|
||||||
Click the **Submit** button to automatically convert the KerasCV checkpoint! Once the checkpoint is successfully converted, you'll see a link to the new repository containing the converted checkpoint. Follow the link to the new repository, and you'll see the Convert KerasCV Space generated a model card with an inference widget to try out the converted model.
|
|
||||||
|
|
||||||
If you prefer to run inference with code, click on the **Use in Diffusers** button in the upper right corner of the model card to copy and paste the code snippet:
|
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from diffusers import DiffusionPipeline
|
from diffusers import DiffusionPipeline
|
||||||
|
|
||||||
pipeline = DiffusionPipeline.from_pretrained(
|
pipeline = DiffusionPipeline.from_pretrained(
|
||||||
"sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline", use_safetensors=True
|
"runwayml/stable-diffusion-v1-5",
|
||||||
|
use_safetensors=True
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
Then, you can generate an image like:
|
</hfoption>
|
||||||
|
<hfoption id="single file">
|
||||||
|
|
||||||
|
Use the [`~loaders.FromSingleFileMixin.from_single_file`] method to load a model with all the weights stored in a single safetensors file.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from diffusers import DiffusionPipeline
|
from diffusers import StableDiffusionPipeline
|
||||||
|
|
||||||
pipeline = DiffusionPipeline.from_pretrained(
|
pipeline = StableDiffusionPipeline.from_single_file(
|
||||||
"sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline", use_safetensors=True
|
"https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors"
|
||||||
)
|
)
|
||||||
pipeline.to("cuda")
|
|
||||||
|
|
||||||
placeholder_token = "<my-funny-cat-token>"
|
|
||||||
prompt = f"two {placeholder_token} getting married, photorealistic, high quality"
|
|
||||||
image = pipeline(prompt, num_inference_steps=50).images[0]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## A1111 LoRA files
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
[Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) (A1111) is a popular web UI for Stable Diffusion that supports model sharing platforms like [Civitai](https://civitai.com/). Models trained with the Low-Rank Adaptation (LoRA) technique are especially popular because they're fast to train and have a much smaller file size than a fully finetuned model. 🤗 Diffusers supports loading A1111 LoRA checkpoints with [`~loaders.LoraLoaderMixin.load_lora_weights`]:
|
#### LoRA files
|
||||||
|
|
||||||
|
[LoRA](https://hf.co/docs/peft/conceptual_guides/adapter#low-rank-adaptation-lora) is a lightweight adapter that is fast and easy to train, making them especially popular for generating images in a certain way or style. These adapters are commonly stored in a safetensors file, and are widely popular on model sharing platforms like [civitai](https://civitai.com/).
|
||||||
|
|
||||||
|
LoRAs are loaded into a base model with the [`~loaders.LoraLoaderMixin.load_lora_weights`] method.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from diffusers import StableDiffusionXLPipeline
|
from diffusers import StableDiffusionXLPipeline
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
|
# base model
|
||||||
pipeline = StableDiffusionXLPipeline.from_pretrained(
|
pipeline = StableDiffusionXLPipeline.from_pretrained(
|
||||||
"Lykon/dreamshaper-xl-1-0", torch_dtype=torch.float16, variant="fp16"
|
"Lykon/dreamshaper-xl-1-0", torch_dtype=torch.float16, variant="fp16"
|
||||||
).to("cuda")
|
).to("cuda")
|
||||||
```
|
|
||||||
|
|
||||||
Download a LoRA checkpoint from Civitai; this example uses the [Blueprintify SD XL 1.0](https://civitai.com/models/150986/blueprintify-sd-xl-10) checkpoint, but feel free to try out any LoRA checkpoint!
|
# download LoRA weights
|
||||||
|
!wget https://civitai.com/api/download/models/168776 -O blueprintify.safetensors
|
||||||
|
|
||||||
```py
|
# load LoRA weights
|
||||||
# uncomment to download the safetensor weights
|
|
||||||
#!wget https://civitai.com/api/download/models/168776 -O blueprintify.safetensors
|
|
||||||
```
|
|
||||||
|
|
||||||
Load the LoRA checkpoint into the pipeline with the [`~loaders.LoraLoaderMixin.load_lora_weights`] method:
|
|
||||||
|
|
||||||
```py
|
|
||||||
pipeline.load_lora_weights(".", weight_name="blueprintify.safetensors")
|
pipeline.load_lora_weights(".", weight_name="blueprintify.safetensors")
|
||||||
```
|
|
||||||
|
|
||||||
Now you can use the pipeline to generate images:
|
|
||||||
|
|
||||||
```py
|
|
||||||
prompt = "bl3uprint, a highly detailed blueprint of the empire state building, explaining how to build all parts, many txt, blueprint grid backdrop"
|
prompt = "bl3uprint, a highly detailed blueprint of the empire state building, explaining how to build all parts, many txt, blueprint grid backdrop"
|
||||||
negative_prompt = "lowres, cropped, worst quality, low quality, normal quality, artifacts, signature, watermark, username, blurry, more than one bridge, bad architecture"
|
negative_prompt = "lowres, cropped, worst quality, low quality, normal quality, artifacts, signature, watermark, username, blurry, more than one bridge, bad architecture"
|
||||||
|
|
||||||
@@ -174,3 +104,378 @@ image
|
|||||||
<div class="flex justify-center">
|
<div class="flex justify-center">
|
||||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/blueprint-lora.png"/>
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/blueprint-lora.png"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
### ckpt
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> Pickled files may be unsafe because they can be exploited to execute malicious code. It is recommended to use safetensors files instead where possible, or convert the weights to safetensors files.
|
||||||
|
|
||||||
|
PyTorch's [torch.save](https://pytorch.org/docs/stable/generated/torch.save.html) function uses Python's [pickle](https://docs.python.org/3/library/pickle.html) utility to serialize and save models. These files are saved as a ckpt file and they contain the entire model's weights.
|
||||||
|
|
||||||
|
Use the [`~loaders.FromSingleFileMixin.from_single_file`] method to directly load a ckpt file.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers import StableDiffusionPipeline
|
||||||
|
|
||||||
|
pipeline = StableDiffusionPipeline.from_single_file(
|
||||||
|
"https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned.ckpt"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Storage layout
|
||||||
|
|
||||||
|
There are two ways model files are organized, either in a Diffusers-multifolder layout or in a single-file layout. The Diffusers-multifolder layout is the default, and each component file (text encoder, UNet, VAE) is stored in a separate subfolder. Diffusers also supports loading models from a single-file layout where all the components are bundled together.
|
||||||
|
|
||||||
|
### Diffusers-multifolder
|
||||||
|
|
||||||
|
The Diffusers-multifolder layout is the default storage layout for Diffusers. Each component's (text encoder, UNet, VAE) weights are stored in a separate subfolder. The weights can be stored as safetensors or ckpt files.
|
||||||
|
|
||||||
|
<div class="flex flex-row gap-4">
|
||||||
|
<div class="flex-1">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/multifolder-layout.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">multifolder layout</figcaption>
|
||||||
|
</div>
|
||||||
|
<div class="flex-1">
|
||||||
|
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/multifolder-unet.png"/>
|
||||||
|
<figcaption class="mt-2 text-center text-sm text-gray-500">UNet subfolder</figcaption>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
To load from Diffusers-multifolder layout, use the [`~DiffusionPipeline.from_pretrained`] method.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers import DiffusionPipeline
|
||||||
|
|
||||||
|
pipeline = DiffusionPipeline.from_pretrained(
|
||||||
|
"stabilityai/stable-diffusion-xl-base-1.0",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
variant="fp16",
|
||||||
|
use_safetensors=True,
|
||||||
|
).to("cuda")
|
||||||
|
```
|
||||||
|
|
||||||
|
Benefits of using the Diffusers-multifolder layout include:
|
||||||
|
|
||||||
|
1. Faster to load each component file individually or in parallel.
|
||||||
|
2. Reduced memory usage because you only load the components you need. For example, models like [SDXL Turbo](https://hf.co/stabilityai/sdxl-turbo), [SDXL Lightning](https://hf.co/ByteDance/SDXL-Lightning), and [Hyper-SD](https://hf.co/ByteDance/Hyper-SD) have the same components except for the UNet. You can reuse their shared components with the [`~DiffusionPipeline.from_pipe`] method without consuming any additional memory (take a look at the [Reuse a pipeline](./loading#reuse-a-pipeline) guide) and only load the UNet. This way, you don't need to download redundant components and unnecessarily use more memory.
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDiscreteScheduler
|
||||||
|
|
||||||
|
# download one model
|
||||||
|
sdxl_pipeline = StableDiffusionXLPipeline.from_pretrained(
|
||||||
|
"stabilityai/stable-diffusion-xl-base-1.0",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
variant="fp16",
|
||||||
|
use_safetensors=True,
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
# switch UNet for another model
|
||||||
|
unet = UNet2DConditionModel.from_pretrained(
|
||||||
|
"stabilityai/sdxl-turbo",
|
||||||
|
subfolder="unet",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
variant="fp16",
|
||||||
|
use_safetensors=True
|
||||||
|
)
|
||||||
|
# reuse all the same components in new model except for the UNet
|
||||||
|
turbo_pipeline = StableDiffusionXLPipeline.from_pipe(
|
||||||
|
sdxl_pipeline, unet=unet,
|
||||||
|
).to("cuda")
|
||||||
|
turbo_pipeline.scheduler = EulerDiscreteScheduler.from_config(
|
||||||
|
turbo_pipeline.scheduler.config,
|
||||||
|
timestep+spacing="trailing"
|
||||||
|
)
|
||||||
|
image = turbo_pipeline(
|
||||||
|
"an astronaut riding a unicorn on mars",
|
||||||
|
num_inference_steps=1,
|
||||||
|
guidance_scale=0.0,
|
||||||
|
).images[0]
|
||||||
|
image
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Reduced storage requirements because if a component, such as the SDXL [VAE](https://hf.co/madebyollin/sdxl-vae-fp16-fix), is shared across multiple models, you only need to download and store a single copy of it instead of downloading and storing it multiple times. For 10 SDXL models, this can save ~3.5GB of storage. The storage savings is even greater for newer models like PixArt Sigma, where the [text encoder](https://hf.co/PixArt-alpha/PixArt-Sigma-XL-2-1024-MS/tree/main/text_encoder) alone is ~19GB!
|
||||||
|
4. Flexibility to replace a component in the model with a newer or better version.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers import DiffusionPipeline, AutoencoderKL
|
||||||
|
|
||||||
|
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, use_safetensors=True)
|
||||||
|
pipeline = DiffusionPipeline.from_pretrained(
|
||||||
|
"stabilityai/stable-diffusion-xl-base-1.0",
|
||||||
|
vae=vae,
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
variant="fp16",
|
||||||
|
use_safetensors=True,
|
||||||
|
).to("cuda")
|
||||||
|
```
|
||||||
|
|
||||||
|
5. More visibility and information about a model's components, which are stored in a [config.json](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/unet/config.json) file in each component subfolder.
|
||||||
|
|
||||||
|
### Single-file
|
||||||
|
|
||||||
|
The single-file layout stores all the model weights in a single file. All the model components (text encoder, UNet, VAE) weights are kept together instead of separately in subfolders. This can be a safetensors or ckpt file.
|
||||||
|
|
||||||
|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/single-file-layout.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
To load from a single-file layout, use the [`~loaders.FromSingleFileMixin.from_single_file`] method.
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from diffusers import StableDiffusionXLPipeline
|
||||||
|
|
||||||
|
pipeline = StableDiffusionXLPipeline.from_single_file(
|
||||||
|
"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
variant="fp16",
|
||||||
|
use_safetensors=True,
|
||||||
|
).to("cuda")
|
||||||
|
```
|
||||||
|
|
||||||
|
Benefits of using a single-file layout include:
|
||||||
|
|
||||||
|
1. Easy compatibility with diffusion interfaces such as [ComfyUI](https://github.com/comfyanonymous/ComfyUI) or [Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) which commonly use a single-file layout.
|
||||||
|
2. Easier to manage (download and share) a single file.
|
||||||
|
|
||||||
|
## Convert layout and files
|
||||||
|
|
||||||
|
Diffusers provides many scripts and methods to convert storage layouts and file formats to enable broader support across the diffusion ecosystem.
|
||||||
|
|
||||||
|
Take a look at the [diffusers/scripts](https://github.com/huggingface/diffusers/tree/main/scripts) collection to find a script that fits your conversion needs.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Scripts that have "`to_diffusers`" appended at the end mean they convert a model to the Diffusers-multifolder layout. Each script has their own specific set of arguments for configuring the conversion, so make sure you check what arguments are available!
|
||||||
|
|
||||||
|
For example, to convert a Stable Diffusion XL model stored in Diffusers-multifolder layout to a single-file layout, run the [convert_diffusers_to_original_sdxl.py](https://github.com/huggingface/diffusers/blob/main/scripts/convert_diffusers_to_original_sdxl.py) script. Provide the path to the model to convert, and the path to save the converted model to. You can optionally specify whether you want to save the model as a safetensors file and whether to save the model in half-precision.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python convert_diffusers_to_original_sdxl.py --model_path path/to/model/to/convert --checkpoint_path path/to/save/model/to --use_safetensors
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also save a model to Diffusers-multifolder layout with the [`~DiffusionPipeline.save_pretrained`] method. This creates a directory for you if it doesn't already exist, and it also saves the files as a safetensors file by default.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers import StableDiffusionXLPipeline
|
||||||
|
|
||||||
|
pipeline = StableDiffusionXLPipeline.from_single_file(
|
||||||
|
"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors",
|
||||||
|
)
|
||||||
|
pipeline.save_pretrained()
|
||||||
|
```
|
||||||
|
|
||||||
|
Lastly, there are also Spaces, such as [SD To Diffusers](https://hf.co/spaces/diffusers/sd-to-diffusers) and [SD-XL To Diffusers](https://hf.co/spaces/diffusers/sdxl-to-diffusers), that provide a more user-friendly interface for converting models to Diffusers-multifolder layout. This is the easiest and most convenient option for converting layouts, and it'll open a PR on your model repository with the converted files. However, this option is not as reliable as running a script, and the Space may fail for more complicated models.
|
||||||
|
|
||||||
|
## Single-file layout usage
|
||||||
|
|
||||||
|
Now that you're familiar with the differences between the Diffusers-multifolder and single-file layout, this section shows you how to load models and pipeline components, customize configuration options for loading, and load local files with the [`~loaders.FromSingleFileMixin.from_single_file`] method.
|
||||||
|
|
||||||
|
### Load a pipeline or model
|
||||||
|
|
||||||
|
Pass the file path of the pipeline or model to the [`~loaders.FromSingleFileMixin.from_single_file`] method to load it.
|
||||||
|
|
||||||
|
<hfoptions id="pipeline-model">
|
||||||
|
<hfoption id="pipeline">
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers import StableDiffusionXLPipeline
|
||||||
|
|
||||||
|
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors"
|
||||||
|
pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_path)
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="model">
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers import StableCascadeUNet
|
||||||
|
|
||||||
|
ckpt_path = "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_lite.safetensors"
|
||||||
|
model = StableCascadeUNet.from_single_file(ckpt_path)
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
|
Customize components in the pipeline by passing them directly to the [`~loaders.FromSingleFileMixin.from_single_file`] method. For example, you can use a different scheduler in a pipeline.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers import StableDiffusionXLPipeline, DDIMScheduler
|
||||||
|
|
||||||
|
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors"
|
||||||
|
scheduler = DDIMScheduler()
|
||||||
|
pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_path, scheduler=scheduler)
|
||||||
|
```
|
||||||
|
|
||||||
|
Or you could use a ControlNet model in the pipeline.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
|
||||||
|
|
||||||
|
ckpt_path = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors"
|
||||||
|
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny")
|
||||||
|
pipeline = StableDiffusionControlNetPipeline.from_single_file(ckpt_path, controlnet=controlnet)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Customize configuration options
|
||||||
|
|
||||||
|
Models have a configuration file that define their attributes like the number of inputs in a UNet. Pipelines configuration options are available in the pipeline's class. For example, if you look at the [`StableDiffusionXLInstructPix2PixPipeline`] class, there is an option to scale the image latents with the `is_cosxl_edit` parameter.
|
||||||
|
|
||||||
|
These configuration files can be found in the models Hub repository or another location from which the configuration file originated (for example, a GitHub repository or locally on your device).
|
||||||
|
|
||||||
|
<hfoptions id="config-file">
|
||||||
|
<hfoption id="Hub configuration file">
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> The [`~loaders.FromSingleFileMixin.from_single_file`] method automatically maps the checkpoint to the appropriate model repository, but there are cases where it is useful to use the `config` parameter. For example, if the model components in the checkpoint are different from the original checkpoint or if a checkpoint doesn't have the necessary metadata to correctly determine the configuration to use for the pipeline.
|
||||||
|
|
||||||
|
The [`~loaders.FromSingleFileMixin.from_single_file`] method automatically determines the configuration to use from the configuration file in the model repository. You could also explicitly specify the configuration to use by providing the repository id to the `config` parameter.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers import StableDiffusionXLPipeline
|
||||||
|
|
||||||
|
ckpt_path = "https://huggingface.co/segmind/SSD-1B/blob/main/SSD-1B.safetensors"
|
||||||
|
repo_id = "segmind/SSD-1B"
|
||||||
|
|
||||||
|
pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_path, config=repo_id)
|
||||||
|
```
|
||||||
|
|
||||||
|
The model loads the configuration file for the [UNet](https://huggingface.co/segmind/SSD-1B/blob/main/unet/config.json), [VAE](https://huggingface.co/segmind/SSD-1B/blob/main/vae/config.json), and [text encoder](https://huggingface.co/segmind/SSD-1B/blob/main/text_encoder/config.json) from their respective subfolders in the repository.
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="original configuration file">
|
||||||
|
|
||||||
|
The [`~loaders.FromSingleFileMixin.from_single_file`] method can also load the original configuration file of a pipeline that is stored elsewhere. Pass a local path or URL of the original configuration file to the `original_config` parameter.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers import StableDiffusionXLPipeline
|
||||||
|
|
||||||
|
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors"
|
||||||
|
original_config = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml"
|
||||||
|
|
||||||
|
pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_path, original_config=original_config)
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Diffusers attempts to infer the pipeline components based on the type signatures of the pipeline class when you use `original_config` with `local_files_only=True`, instead of fetching the configuration files from the model repository on the Hub. This prevents backward breaking changes in code that can't connect to the internet to fetch the necessary configuration files.
|
||||||
|
>
|
||||||
|
> This is not as reliable as providing a path to a local model repository with the `config` parameter, and might lead to errors during pipeline configuration. To avoid errors, run the pipeline with `local_files_only=False` once to download the appropriate pipeline configuration files to the local cache.
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
|
While the configuration files specify the pipeline or models default parameters, you can override them by providing the parameters directly to the [`~loaders.FromSingleFileMixin.from_single_file`] method. Any parameter supported by the model or pipeline class can be configured in this way.
|
||||||
|
|
||||||
|
<hfoptions id="override">
|
||||||
|
<hfoption id="pipeline">
|
||||||
|
|
||||||
|
For example, to scale the image latents in [`StableDiffusionXLInstructPix2PixPipeline`] pass the `is_cosxl_edit` parameter.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import StableDiffusionXLInstructPix2PixPipeline
|
||||||
|
|
||||||
|
ckpt_path = "https://huggingface.co/stabilityai/cosxl/blob/main/cosxl_edit.safetensors"
|
||||||
|
pipeline = StableDiffusionXLInstructPix2PixPipeline.from_single_file(ckpt_path, config="diffusers/sdxl-instructpix2pix-768", is_cosxl_edit=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="model">
|
||||||
|
|
||||||
|
For example, to upcast the attention dimensions in a [`UNet2DConditionModel`] pass the `upcast_attention` parameter.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import UNet2DConditionModel
|
||||||
|
|
||||||
|
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors"
|
||||||
|
model = UNet2DConditionModel.from_single_file(ckpt_path, upcast_attention=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
|
### Local files
|
||||||
|
|
||||||
|
In Diffusers>=v0.28.0, the [`~loaders.FromSingleFileMixin.from_single_file`] method attempts to configure a pipeline or model by inferring the model type from the keys in the checkpoint file. The inferred model type is used to determine the appropriate model repository on the Hugging Face Hub to configure the model or pipeline.
|
||||||
|
|
||||||
|
For example, any single file checkpoint based on the Stable Diffusion XL base model will use the [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) model repository to configure the pipeline.
|
||||||
|
|
||||||
|
But if you're working in an environment with restricted internet access, you should download the configuration files with the [`~huggingface_hub.snapshot_download`] function, and the model checkpoint with the [`~huggingface_hub.hf_hub_download`] function. By default, these files are downloaded to the Hugging Face Hub [cache directory](https://huggingface.co/docs/huggingface_hub/en/guides/manage-cache), but you can specify a preferred directory to download the files to with the `local_dir` parameter.
|
||||||
|
|
||||||
|
Pass the configuration and checkpoint paths to the [`~loaders.FromSingleFileMixin.from_single_file`] method to load locally.
|
||||||
|
|
||||||
|
<hfoptions id="local">
|
||||||
|
<hfoption id="Hub cache directory">
|
||||||
|
|
||||||
|
```python
|
||||||
|
from huggingface_hub import hf_hub_download, snapshot_download
|
||||||
|
|
||||||
|
my_local_checkpoint_path = hf_hub_download(
|
||||||
|
repo_id="segmind/SSD-1B",
|
||||||
|
filename="SSD-1B.safetensors"
|
||||||
|
)
|
||||||
|
|
||||||
|
my_local_config_path = snapshot_download(
|
||||||
|
repo_id="segmind/SSD-1B",
|
||||||
|
allowed_patterns=["*.json", "**/*.json", "*.txt", "**/*.txt"]
|
||||||
|
)
|
||||||
|
|
||||||
|
pipeline = StableDiffusionXLPipeline.from_single_file(my_local_checkpoint_path, config=my_local_config_path, local_files_only=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="specific local directory">
|
||||||
|
|
||||||
|
```python
|
||||||
|
from huggingface_hub import hf_hub_download, snapshot_download
|
||||||
|
|
||||||
|
my_local_checkpoint_path = hf_hub_download(
|
||||||
|
repo_id="segmind/SSD-1B",
|
||||||
|
filename="SSD-1B.safetensors"
|
||||||
|
local_dir="my_local_checkpoints"
|
||||||
|
)
|
||||||
|
|
||||||
|
my_local_config_path = snapshot_download(
|
||||||
|
repo_id="segmind/SSD-1B",
|
||||||
|
allowed_patterns=["*.json", "**/*.json", "*.txt", "**/*.txt"]
|
||||||
|
local_dir="my_local_config"
|
||||||
|
)
|
||||||
|
|
||||||
|
pipeline = StableDiffusionXLPipeline.from_single_file(my_local_checkpoint_path, config=my_local_config_path, local_files_only=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
|
#### Local files without symlink
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> In huggingface_hub>=v0.23.0, the `local_dir_use_symlinks` argument isn't necessary for the [`~huggingface_hub.hf_hub_download`] and [`~huggingface_hub.snapshot_download`] functions.
|
||||||
|
|
||||||
|
The [`~loaders.FromSingleFileMixin.from_single_file`] method relies on the [huggingface_hub](https://hf.co/docs/huggingface_hub/index) caching mechanism to fetch and store checkpoints and configuration files for models and pipelines. If you're working with a file system that does not support symlinking, you should download the checkpoint file to a local directory first, and disable symlinking with the `local_dir_use_symlink=False` parameter in the [`~huggingface_hub.hf_hub_download`] function and [`~huggingface_hub.snapshot_download`] functions.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from huggingface_hub import hf_hub_download, snapshot_download
|
||||||
|
|
||||||
|
my_local_checkpoint_path = hf_hub_download(
|
||||||
|
repo_id="segmind/SSD-1B",
|
||||||
|
filename="SSD-1B.safetensors"
|
||||||
|
local_dir="my_local_checkpoints",
|
||||||
|
local_dir_use_symlinks=False
|
||||||
|
)
|
||||||
|
print("My local checkpoint: ", my_local_checkpoint_path)
|
||||||
|
|
||||||
|
my_local_config_path = snapshot_download(
|
||||||
|
repo_id="segmind/SSD-1B",
|
||||||
|
allowed_patterns=["*.json", "**/*.json", "*.txt", "**/*.txt"]
|
||||||
|
local_dir_use_symlinks=False,
|
||||||
|
)
|
||||||
|
print("My local config: ", my_local_config_path)
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you can pass the local paths to the `pretrained_model_link_or_path` and `config` parameters.
|
||||||
|
|
||||||
|
```python
|
||||||
|
pipeline = StableDiffusionXLPipeline.from_single_file(my_local_checkpoint_path, config=my_local_config_path, local_files_only=True)
|
||||||
|
```
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user