mirror of
https://github.com/huggingface/diffusers.git
synced 2025-12-09 22:14:43 +08:00
Compare commits
206 Commits
v0.16.1-pa
...
v0.17.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6bbde99c4a | ||
|
|
5916743b22 | ||
|
|
7ddc4a1a9f | ||
|
|
cd9d0913d9 | ||
|
|
fdec23188a | ||
|
|
12a232efa9 | ||
|
|
74fd735eb0 | ||
|
|
2de9e2df36 | ||
|
|
11b3002b48 | ||
|
|
10f4ecd177 | ||
|
|
de16f64667 | ||
|
|
017ee1609b | ||
|
|
8669e8313d | ||
|
|
b45204ea5a | ||
|
|
a8b0f42c38 | ||
|
|
41ae670828 | ||
|
|
462956be7b | ||
|
|
5990014700 | ||
|
|
1a6a647e06 | ||
|
|
995bbcb9aa | ||
|
|
d0416ab090 | ||
|
|
1994dbcb5e | ||
|
|
262d539a8a | ||
|
|
0fc2fb71c1 | ||
|
|
523a50a8eb | ||
|
|
de45af4a46 | ||
|
|
b95cbdf6fc | ||
|
|
7a39691362 | ||
|
|
5911a3aa47 | ||
|
|
b7af946138 | ||
|
|
d3717e6368 | ||
|
|
0dbdc0cbae | ||
|
|
0e8688113a | ||
|
|
f1d4743394 | ||
|
|
a6c7b5b6b7 | ||
|
|
8e552bb4fe | ||
|
|
32ea2142c0 | ||
|
|
55dbfa0229 | ||
|
|
4f14b36329 | ||
|
|
f751b8844e | ||
|
|
abb89da4de | ||
|
|
7d0ac4eeab | ||
|
|
0cc3a7a123 | ||
|
|
9d3ff0794d | ||
|
|
a359ab4e29 | ||
|
|
160c377ddc | ||
|
|
bb22d546c0 | ||
|
|
799f5b4e12 | ||
|
|
07ef4855cd | ||
|
|
6cbddf558a | ||
|
|
35a740427e | ||
|
|
0612f48cd0 | ||
|
|
c059cc0992 | ||
|
|
c0f867afd1 | ||
|
|
c6ae883751 | ||
|
|
5559d04237 | ||
|
|
9917c32916 | ||
|
|
ab986769f1 | ||
|
|
bdc75e753d | ||
|
|
1d1f648c6b | ||
|
|
67cf0445ef | ||
|
|
352ca3198c | ||
|
|
7948db81c5 | ||
|
|
bf16a97018 | ||
|
|
66356e7dd5 | ||
|
|
ffa33d631a | ||
|
|
d8ce53a8c4 | ||
|
|
d114d80fd2 | ||
|
|
e5215dee9a | ||
|
|
03b7a84cbe | ||
|
|
f19f128735 | ||
|
|
a94977b8b3 | ||
|
|
8e69708b0d | ||
|
|
db56f8a4f5 | ||
|
|
c13dbd5c3a | ||
|
|
bde2cb5d9b | ||
|
|
abab61d49e | ||
|
|
b402604de4 | ||
|
|
84ce50f08e | ||
|
|
9e2734a710 | ||
|
|
d4197bf4d7 | ||
|
|
b134f6a8b6 | ||
|
|
edc6505193 | ||
|
|
2f997f30ab | ||
|
|
67cd460154 | ||
|
|
64bf5d33b7 | ||
|
|
c4359d63e3 | ||
|
|
f3d570c273 | ||
|
|
2b56e8ca68 | ||
|
|
b8b5daaee3 | ||
|
|
229fd8cbca | ||
|
|
a2874af297 | ||
|
|
0160e5146f | ||
|
|
194b0a425d | ||
|
|
6dd3871ae0 | ||
|
|
51843fd7d0 | ||
|
|
49ad61c204 | ||
|
|
4bbc51d94d | ||
|
|
f7b4f51cc2 | ||
|
|
85eff637aa | ||
|
|
e589bdb956 | ||
|
|
00c76f6ff1 | ||
|
|
e343443565 | ||
|
|
8d646f2294 | ||
|
|
8917769499 | ||
|
|
49b7ccfb96 | ||
|
|
7200985eab | ||
|
|
c9f939bf98 | ||
|
|
2858d7e15e | ||
|
|
88295f92d9 | ||
|
|
2faf91dbde | ||
|
|
bd78f63a54 | ||
|
|
3ebd2d1f9e | ||
|
|
15f1bab13b | ||
|
|
415c616712 | ||
|
|
c09c4f3ab7 | ||
|
|
6070b32fcf | ||
|
|
0392eceba8 | ||
|
|
92ea5baca2 | ||
|
|
754fac82d2 | ||
|
|
17f9aed79c | ||
|
|
886575ee43 | ||
|
|
9d44e2fb66 | ||
|
|
d2285f5158 | ||
|
|
326f326e17 | ||
|
|
29b1325a5a | ||
|
|
7a32b6beeb | ||
|
|
bdefabd1a8 | ||
|
|
909742dbd6 | ||
|
|
28f404349d | ||
|
|
03e5126978 | ||
|
|
b1b92f4a98 | ||
|
|
7f6373d264 | ||
|
|
3a237f4fa2 | ||
|
|
1a5797c6d4 | ||
|
|
f92253015c | ||
|
|
58c6f9cb71 | ||
|
|
af2a237676 | ||
|
|
d71db894eb | ||
|
|
90f5f3c4d4 | ||
|
|
01c056f094 | ||
|
|
e0b56d2b18 | ||
|
|
f740d357c9 | ||
|
|
5e746753d6 | ||
|
|
c49e9ede4d | ||
|
|
82e6fa56f0 | ||
|
|
edb087a217 | ||
|
|
94a0c644a8 | ||
|
|
26832aa5ef | ||
|
|
c559479592 | ||
|
|
a757b2db6e | ||
|
|
571bc1ea11 | ||
|
|
f381402ec8 | ||
|
|
3d8b3d7cd8 | ||
|
|
0ffac97933 | ||
|
|
b0966f5801 | ||
|
|
0407c3e7d0 | ||
|
|
7ce3fa010a | ||
|
|
abd86d1c17 | ||
|
|
e9aa0925a8 | ||
|
|
36f43ea75a | ||
|
|
27522b585b | ||
|
|
8d4c7d0ea0 | ||
|
|
29ad75dc3b | ||
|
|
379197a2f0 | ||
|
|
79c0e24a14 | ||
|
|
fa9e35fca4 | ||
|
|
4bae76e453 | ||
|
|
022479416f | ||
|
|
2dd408504a | ||
|
|
79bd909dbd | ||
|
|
63a8ef7b73 | ||
|
|
0ccad2ad2d | ||
|
|
efc48da23b | ||
|
|
5c7a35a259 | ||
|
|
a7f25b4a88 | ||
|
|
0e82fb19e1 | ||
|
|
709cf554f6 | ||
|
|
536684eb2f | ||
|
|
384c83aa9a | ||
|
|
14b460614b | ||
|
|
4d35d7fea3 | ||
|
|
a7b0671c07 | ||
|
|
be0bfcec4d | ||
|
|
d464214464 | ||
|
|
6290668254 | ||
|
|
73cc43109b | ||
|
|
0614fd2038 | ||
|
|
462b4edd31 | ||
|
|
71de5b7051 | ||
|
|
256e6960cb | ||
|
|
329d1df8f2 | ||
|
|
364d59d13b | ||
|
|
2ced899cc7 | ||
|
|
b63419a28a | ||
|
|
eb29dbad17 | ||
|
|
d92c4d5ab7 | ||
|
|
eade4308da | ||
|
|
fa31da29e5 | ||
|
|
77bfb56241 | ||
|
|
70ef774fa0 | ||
|
|
0b64c2c6c3 | ||
|
|
fd512d7461 | ||
|
|
e0a2bd15f9 | ||
|
|
c399de396d | ||
|
|
f842396367 |
4
.github/actions/setup-miniconda/action.yml
vendored
4
.github/actions/setup-miniconda/action.yml
vendored
@@ -27,7 +27,7 @@ runs:
|
||||
- name: Get date
|
||||
id: get-date
|
||||
shell: bash
|
||||
run: echo "::set-output name=today::$(/bin/date -u '+%Y%m%d')d"
|
||||
run: echo "today=$(/bin/date -u '+%Y%m%d')d" >> $GITHUB_OUTPUT
|
||||
- name: Setup miniconda cache
|
||||
id: miniconda-cache
|
||||
uses: actions/cache@v2
|
||||
@@ -143,4 +143,4 @@ runs:
|
||||
echo "There is ${AVAIL}KB free space left in $MOUNT, continue"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
2
.github/workflows/build_documentation.yml
vendored
2
.github/workflows/build_documentation.yml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
branches:
|
||||
- main
|
||||
- doc-builder*
|
||||
- v*-release
|
||||
- v*-patch
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
68
.github/workflows/pr_tests.yml
vendored
68
.github/workflows/pr_tests.yml
vendored
@@ -36,11 +36,6 @@ jobs:
|
||||
runner: docker-cpu
|
||||
image: diffusers/diffusers-flax-cpu
|
||||
report: flax_cpu
|
||||
- name: Fast ONNXRuntime CPU tests
|
||||
framework: onnxruntime
|
||||
runner: docker-cpu
|
||||
image: diffusers/diffusers-onnxruntime-cpu
|
||||
report: onnx_cpu
|
||||
- name: PyTorch Example CPU tests
|
||||
framework: pytorch_examples
|
||||
runner: docker-cpu
|
||||
@@ -69,8 +64,6 @@ jobs:
|
||||
run: |
|
||||
apt-get update && apt-get install libsndfile1-dev -y
|
||||
python -m pip install -e .[quality,test]
|
||||
python -m pip install -U git+https://github.com/huggingface/transformers
|
||||
python -m pip install git+https://github.com/huggingface/accelerate
|
||||
|
||||
- name: Environment
|
||||
run: |
|
||||
@@ -100,14 +93,6 @@ jobs:
|
||||
--make-reports=tests_${{ matrix.config.report }} \
|
||||
tests
|
||||
|
||||
- name: Run fast ONNXRuntime CPU tests
|
||||
if: ${{ matrix.config.framework == 'onnxruntime' }}
|
||||
run: |
|
||||
python -m pytest -n 2 --max-worker-restart=0 --dist=loadfile \
|
||||
-s -v -k "Onnx" \
|
||||
--make-reports=tests_${{ matrix.config.report }} \
|
||||
tests/
|
||||
|
||||
- name: Run example PyTorch CPU tests
|
||||
if: ${{ matrix.config.framework == 'pytorch_examples' }}
|
||||
run: |
|
||||
@@ -125,56 +110,3 @@ jobs:
|
||||
with:
|
||||
name: pr_${{ matrix.config.report }}_test_reports
|
||||
path: reports
|
||||
|
||||
run_fast_tests_apple_m1:
|
||||
name: Fast PyTorch MPS tests on MacOS
|
||||
runs-on: [ self-hosted, apple-m1 ]
|
||||
|
||||
steps:
|
||||
- name: Checkout diffusers
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Clean checkout
|
||||
shell: arch -arch arm64 bash {0}
|
||||
run: |
|
||||
git clean -fxd
|
||||
|
||||
- name: Setup miniconda
|
||||
uses: ./.github/actions/setup-miniconda
|
||||
with:
|
||||
python-version: 3.9
|
||||
|
||||
- name: Install dependencies
|
||||
shell: arch -arch arm64 bash {0}
|
||||
run: |
|
||||
${CONDA_RUN} python -m pip install --upgrade pip
|
||||
${CONDA_RUN} python -m pip install -e .[quality,test]
|
||||
${CONDA_RUN} python -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
${CONDA_RUN} python -m pip install git+https://github.com/huggingface/accelerate
|
||||
${CONDA_RUN} python -m pip install -U git+https://github.com/huggingface/transformers
|
||||
|
||||
- name: Environment
|
||||
shell: arch -arch arm64 bash {0}
|
||||
run: |
|
||||
${CONDA_RUN} python utils/print_env.py
|
||||
|
||||
- name: Run fast PyTorch tests on M1 (MPS)
|
||||
shell: arch -arch arm64 bash {0}
|
||||
env:
|
||||
HF_HOME: /System/Volumes/Data/mnt/cache
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
run: |
|
||||
${CONDA_RUN} python -m pytest -n 0 -s -v --make-reports=tests_torch_mps tests/
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
run: cat reports/tests_torch_mps_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: pr_torch_mps_test_reports
|
||||
path: reports
|
||||
|
||||
8
.github/workflows/push_tests.yml
vendored
8
.github/workflows/push_tests.yml
vendored
@@ -17,6 +17,7 @@ jobs:
|
||||
run_slow_tests:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
max-parallel: 1
|
||||
matrix:
|
||||
config:
|
||||
- name: Slow PyTorch CUDA tests on Ubuntu
|
||||
@@ -61,8 +62,6 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install -e .[quality,test]
|
||||
python -m pip install -U git+https://github.com/huggingface/transformers
|
||||
python -m pip install git+https://github.com/huggingface/accelerate
|
||||
|
||||
- name: Environment
|
||||
run: |
|
||||
@@ -72,6 +71,9 @@ jobs:
|
||||
if: ${{ matrix.config.framework == 'pytorch' }}
|
||||
env:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||
|
||||
run: |
|
||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||
-s -v -k "not Flax and not Onnx" \
|
||||
@@ -131,8 +133,6 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install -e .[quality,test,training]
|
||||
python -m pip install git+https://github.com/huggingface/accelerate
|
||||
python -m pip install -U git+https://github.com/huggingface/transformers
|
||||
|
||||
- name: Environment
|
||||
run: |
|
||||
|
||||
57
.github/workflows/push_tests_fast.yml
vendored
57
.github/workflows/push_tests_fast.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Slow tests on main
|
||||
name: Fast tests on main
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -62,8 +62,6 @@ jobs:
|
||||
run: |
|
||||
apt-get update && apt-get install libsndfile1-dev -y
|
||||
python -m pip install -e .[quality,test]
|
||||
python -m pip install -U git+https://github.com/huggingface/transformers
|
||||
python -m pip install git+https://github.com/huggingface/accelerate
|
||||
|
||||
- name: Environment
|
||||
run: |
|
||||
@@ -110,56 +108,3 @@ jobs:
|
||||
with:
|
||||
name: pr_${{ matrix.config.report }}_test_reports
|
||||
path: reports
|
||||
|
||||
run_fast_tests_apple_m1:
|
||||
name: Fast PyTorch MPS tests on MacOS
|
||||
runs-on: [ self-hosted, apple-m1 ]
|
||||
|
||||
steps:
|
||||
- name: Checkout diffusers
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Clean checkout
|
||||
shell: arch -arch arm64 bash {0}
|
||||
run: |
|
||||
git clean -fxd
|
||||
|
||||
- name: Setup miniconda
|
||||
uses: ./.github/actions/setup-miniconda
|
||||
with:
|
||||
python-version: 3.9
|
||||
|
||||
- name: Install dependencies
|
||||
shell: arch -arch arm64 bash {0}
|
||||
run: |
|
||||
${CONDA_RUN} python -m pip install --upgrade pip
|
||||
${CONDA_RUN} python -m pip install -e .[quality,test]
|
||||
${CONDA_RUN} python -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
${CONDA_RUN} python -m pip install git+https://github.com/huggingface/accelerate
|
||||
${CONDA_RUN} python -m pip install -U git+https://github.com/huggingface/transformers
|
||||
|
||||
- name: Environment
|
||||
shell: arch -arch arm64 bash {0}
|
||||
run: |
|
||||
${CONDA_RUN} python utils/print_env.py
|
||||
|
||||
- name: Run fast PyTorch tests on M1 (MPS)
|
||||
shell: arch -arch arm64 bash {0}
|
||||
env:
|
||||
HF_HOME: /System/Volumes/Data/mnt/cache
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
run: |
|
||||
${CONDA_RUN} python -m pytest -n 0 -s -v --make-reports=tests_torch_mps tests/
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
run: cat reports/tests_torch_mps_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: pr_torch_mps_test_reports
|
||||
path: reports
|
||||
|
||||
68
.github/workflows/push_tests_mps.yml
vendored
Normal file
68
.github/workflows/push_tests_mps.yml
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
name: Fast mps tests on main
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
DIFFUSERS_IS_CI: yes
|
||||
HF_HOME: /mnt/cache
|
||||
OMP_NUM_THREADS: 8
|
||||
MKL_NUM_THREADS: 8
|
||||
PYTEST_TIMEOUT: 600
|
||||
RUN_SLOW: no
|
||||
|
||||
jobs:
|
||||
run_fast_tests_apple_m1:
|
||||
name: Fast PyTorch MPS tests on MacOS
|
||||
runs-on: [ self-hosted, apple-m1 ]
|
||||
|
||||
steps:
|
||||
- name: Checkout diffusers
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Clean checkout
|
||||
shell: arch -arch arm64 bash {0}
|
||||
run: |
|
||||
git clean -fxd
|
||||
|
||||
- name: Setup miniconda
|
||||
uses: ./.github/actions/setup-miniconda
|
||||
with:
|
||||
python-version: 3.9
|
||||
|
||||
- name: Install dependencies
|
||||
shell: arch -arch arm64 bash {0}
|
||||
run: |
|
||||
${CONDA_RUN} python -m pip install --upgrade pip
|
||||
${CONDA_RUN} python -m pip install -e .[quality,test]
|
||||
${CONDA_RUN} python -m pip install torch torchvision torchaudio
|
||||
${CONDA_RUN} python -m pip install accelerate --upgrade
|
||||
${CONDA_RUN} python -m pip install transformers --upgrade
|
||||
|
||||
- name: Environment
|
||||
shell: arch -arch arm64 bash {0}
|
||||
run: |
|
||||
${CONDA_RUN} python utils/print_env.py
|
||||
|
||||
- name: Run fast PyTorch tests on M1 (MPS)
|
||||
shell: arch -arch arm64 bash {0}
|
||||
env:
|
||||
HF_HOME: /System/Volumes/Data/mnt/cache
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
run: |
|
||||
${CONDA_RUN} python -m pytest -n 0 -s -v --make-reports=tests_torch_mps tests/
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
run: cat reports/tests_torch_mps_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: pr_torch_mps_test_reports
|
||||
path: reports
|
||||
@@ -125,14 +125,14 @@ Awesome! Tell us what problem it solved for you.
|
||||
|
||||
You can open a feature request [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=).
|
||||
|
||||
#### 2.3 Feedback.
|
||||
#### 2.3 Feedback.
|
||||
|
||||
Feedback about the library design and why it is good or not good helps the core maintainers immensely to build a user-friendly library. To understand the philosophy behind the current design philosophy, please have a look [here](https://huggingface.co/docs/diffusers/conceptual/philosophy). If you feel like a certain design choice does not fit with the current design philosophy, please explain why and how it should be changed. If a certain design choice follows the design philosophy too much, hence restricting use cases, explain why and how it should be changed.
|
||||
If a certain design choice is very useful for you, please also leave a note as this is great feedback for future design decisions.
|
||||
|
||||
You can open an issue about feedback [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=).
|
||||
|
||||
#### 2.4 Technical questions.
|
||||
#### 2.4 Technical questions.
|
||||
|
||||
Technical questions are mainly about why certain code of the library was written in a certain way, or what a certain part of the code does. Please make sure to link to the code in question and please provide detail on
|
||||
why this part of the code is difficult to understand.
|
||||
@@ -394,8 +394,8 @@ passes. You should run the tests impacted by your changes like this:
|
||||
```bash
|
||||
$ pytest tests/<TEST_TO_RUN>.py
|
||||
```
|
||||
|
||||
Before you run the tests, please make sure you install the dependencies required for testing. You can do so
|
||||
|
||||
Before you run the tests, please make sure you install the dependencies required for testing. You can do so
|
||||
with this command:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -27,18 +27,18 @@ In a nutshell, Diffusers is built to be a natural extension of PyTorch. Therefor
|
||||
|
||||
## Simple over easy
|
||||
|
||||
As PyTorch states, **explicit is better than implicit** and **simple is better than complex**. This design philosophy is reflected in multiple parts of the library:
|
||||
As PyTorch states, **explicit is better than implicit** and **simple is better than complex**. This design philosophy is reflected in multiple parts of the library:
|
||||
- We follow PyTorch's API with methods like [`DiffusionPipeline.to`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.to) to let the user handle device management.
|
||||
- Raising concise error messages is preferred to silently correct erroneous input. Diffusers aims at teaching the user, rather than making the library as easy to use as possible.
|
||||
- Complex model vs. scheduler logic is exposed instead of magically handled inside. Schedulers/Samplers are separated from diffusion models with minimal dependencies on each other. This forces the user to write the unrolled denoising loop. However, the separation allows for easier debugging and gives the user more control over adapting the denoising process or switching out diffusion models or schedulers.
|
||||
- Separately trained components of the diffusion pipeline, *e.g.* the text encoder, the unet, and the variational autoencoder, each have their own model class. This forces the user to handle the interaction between the different model components, and the serialization format separates the model components into different files. However, this allows for easier debugging and customization. Dreambooth or textual inversion training
|
||||
- Separately trained components of the diffusion pipeline, *e.g.* the text encoder, the unet, and the variational autoencoder, each have their own model class. This forces the user to handle the interaction between the different model components, and the serialization format separates the model components into different files. However, this allows for easier debugging and customization. Dreambooth or textual inversion training
|
||||
is very simple thanks to diffusers' ability to separate single components of the diffusion pipeline.
|
||||
|
||||
## Tweakable, contributor-friendly over abstraction
|
||||
|
||||
For large parts of the library, Diffusers adopts an important design principle of the [Transformers library](https://github.com/huggingface/transformers), which is to prefer copy-pasted code over hasty abstractions. This design principle is very opinionated and stands in stark contrast to popular design principles such as [Don't repeat yourself (DRY)](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself).
|
||||
For large parts of the library, Diffusers adopts an important design principle of the [Transformers library](https://github.com/huggingface/transformers), which is to prefer copy-pasted code over hasty abstractions. This design principle is very opinionated and stands in stark contrast to popular design principles such as [Don't repeat yourself (DRY)](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself).
|
||||
In short, just like Transformers does for modeling files, diffusers prefers to keep an extremely low level of abstraction and very self-contained code for pipelines and schedulers.
|
||||
Functions, long code blocks, and even classes can be copied across multiple files which at first can look like a bad, sloppy design choice that makes the library unmaintainable.
|
||||
Functions, long code blocks, and even classes can be copied across multiple files which at first can look like a bad, sloppy design choice that makes the library unmaintainable.
|
||||
**However**, this design has proven to be extremely successful for Transformers and makes a lot of sense for community-driven, open-source machine learning libraries because:
|
||||
- Machine Learning is an extremely fast-moving field in which paradigms, model architectures, and algorithms are changing rapidly, which therefore makes it very difficult to define long-lasting code abstractions.
|
||||
- Machine Learning practitioners like to be able to quickly tweak existing code for ideation and research and therefore prefer self-contained code over one that contains many abstractions.
|
||||
@@ -47,10 +47,10 @@ Functions, long code blocks, and even classes can be copied across multiple file
|
||||
At Hugging Face, we call this design the **single-file policy** which means that almost all of the code of a certain class should be written in a single, self-contained file. To read more about the philosophy, you can have a look
|
||||
at [this blog post](https://huggingface.co/blog/transformers-design-philosophy).
|
||||
|
||||
In diffusers, we follow this philosophy for both pipelines and schedulers, but only partly for diffusion models. The reason we don't follow this design fully for diffusion models is because almost all diffusion pipelines, such
|
||||
In diffusers, we follow this philosophy for both pipelines and schedulers, but only partly for diffusion models. The reason we don't follow this design fully for diffusion models is because almost all diffusion pipelines, such
|
||||
as [DDPM](https://huggingface.co/docs/diffusers/v0.12.0/en/api/pipelines/ddpm), [Stable Diffusion](https://huggingface.co/docs/diffusers/v0.12.0/en/api/pipelines/stable_diffusion/overview#stable-diffusion-pipelines), [UnCLIP (Dalle-2)](https://huggingface.co/docs/diffusers/v0.12.0/en/api/pipelines/unclip#overview) and [Imagen](https://imagen.research.google/) all rely on the same diffusion model, the [UNet](https://huggingface.co/docs/diffusers/api/models#diffusers.UNet2DConditionModel).
|
||||
|
||||
Great, now you should have generally understood why 🧨 Diffusers is designed the way it is 🤗.
|
||||
Great, now you should have generally understood why 🧨 Diffusers is designed the way it is 🤗.
|
||||
We try to apply these design principles consistently across the library. Nevertheless, there are some minor exceptions to the philosophy or some unlucky design choices. If you have feedback regarding the design, we would ❤️ to hear it [directly on GitHub](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=).
|
||||
|
||||
## Design Philosophy in Details
|
||||
@@ -89,7 +89,7 @@ The following design principles are followed:
|
||||
- Models should by default have the highest precision and lowest performance setting.
|
||||
- To integrate new model checkpoints whose general architecture can be classified as an architecture that already exists in Diffusers, the existing model architecture shall be adapted to make it work with the new checkpoint. One should only create a new file if the model architecture is fundamentally different.
|
||||
- Models should be designed to be easily extendable to future changes. This can be achieved by limiting public function arguments, configuration arguments, and "foreseeing" future changes, *e.g.* it is usually better to add `string` "...type" arguments that can easily be extended to new future types instead of boolean `is_..._type` arguments. Only the minimum amount of changes shall be made to existing architectures to make a new model checkpoint work.
|
||||
- The model design is a difficult trade-off between keeping code readable and concise and supporting many model checkpoints. For most parts of the modeling code, classes shall be adapted for new model checkpoints, while there are some exceptions where it is preferred to add new classes to make sure the code is kept concise and
|
||||
- The model design is a difficult trade-off between keeping code readable and concise and supporting many model checkpoints. For most parts of the modeling code, classes shall be adapted for new model checkpoints, while there are some exceptions where it is preferred to add new classes to make sure the code is kept concise and
|
||||
readable longterm, such as [UNet blocks](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_blocks.py) and [Attention processors](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
|
||||
|
||||
### Schedulers
|
||||
@@ -97,9 +97,9 @@ readable longterm, such as [UNet blocks](https://github.com/huggingface/diffuser
|
||||
Schedulers are responsible to guide the denoising process for inference as well as to define a noise schedule for training. They are designed as individual classes with loadable configuration files and strongly follow the **single-file policy**.
|
||||
|
||||
The following design principles are followed:
|
||||
- All schedulers are found in [`src/diffusers/schedulers`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers).
|
||||
- Schedulers are **not** allowed to import from large utils files and shall be kept very self-contained.
|
||||
- One scheduler python file corresponds to one scheduler algorithm (as might be defined in a paper).
|
||||
- All schedulers are found in [`src/diffusers/schedulers`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers).
|
||||
- Schedulers are **not** allowed to import from large utils files and shall be kept very self-contained.
|
||||
- One scheduler python file corresponds to one scheduler algorithm (as might be defined in a paper).
|
||||
- If schedulers share similar functionalities, we can make use of the `#Copied from` mechanism.
|
||||
- Schedulers all inherit from `SchedulerMixin` and `ConfigMixin`.
|
||||
- Schedulers can be easily swapped out with the [`ConfigMixin.from_config`](https://huggingface.co/docs/diffusers/main/en/api/configuration#diffusers.ConfigMixin.from_config) method as explained in detail [here](./using-diffusers/schedulers.mdx).
|
||||
|
||||
142
README.md
142
README.md
@@ -1,6 +1,6 @@
|
||||
<p align="center">
|
||||
<br>
|
||||
<img src="./docs/source/en/imgs/diffusers_library.jpg" width="400"/>
|
||||
<img src="https://github.com/huggingface/diffusers/blob/main/docs/source/en/imgs/diffusers_library.jpg" width="400"/>
|
||||
<br>
|
||||
<p>
|
||||
<p align="center">
|
||||
@@ -30,7 +30,7 @@ We recommend installing 🤗 Diffusers in a virtual environment from PyPi or Con
|
||||
### PyTorch
|
||||
|
||||
With `pip` (official package):
|
||||
|
||||
|
||||
```bash
|
||||
pip install --upgrade diffusers[torch]
|
||||
```
|
||||
@@ -59,8 +59,9 @@ Generating outputs is super easy with 🤗 Diffusers. To generate an image from
|
||||
|
||||
```python
|
||||
from diffusers import DiffusionPipeline
|
||||
import torch
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
||||
pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
||||
pipeline.to("cuda")
|
||||
pipeline("An image of a squirrel in Picasso style").images[0]
|
||||
```
|
||||
@@ -99,58 +100,14 @@ Check out the [Quickstart](https://huggingface.co/docs/diffusers/quicktour) to l
|
||||
|
||||
| **Documentation** | **What can I learn?** |
|
||||
|---------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Tutorial | A basic crash course for learning how to use the library's most important features like using models and schedulers to build your own diffusion system, and training your own diffusion model. |
|
||||
| Loading | Guides for how to load and configure all the components (pipelines, models, and schedulers) of the library, as well as how to use different schedulers. |
|
||||
| Pipelines for inference | Guides for how to use pipelines for different inference tasks, batched generation, controlling generated outputs and randomness, and how to contribute a pipeline to the library. |
|
||||
| Optimization | Guides for how to optimize your diffusion model to run faster and consume less memory. |
|
||||
| [Tutorial](https://huggingface.co/docs/diffusers/tutorials/tutorial_overview) | A basic crash course for learning how to use the library's most important features like using models and schedulers to build your own diffusion system, and training your own diffusion model. |
|
||||
| [Loading](https://huggingface.co/docs/diffusers/using-diffusers/loading_overview) | Guides for how to load and configure all the components (pipelines, models, and schedulers) of the library, as well as how to use different schedulers. |
|
||||
| [Pipelines for inference](https://huggingface.co/docs/diffusers/using-diffusers/pipeline_overview) | Guides for how to use pipelines for different inference tasks, batched generation, controlling generated outputs and randomness, and how to contribute a pipeline to the library. |
|
||||
| [Optimization](https://huggingface.co/docs/diffusers/optimization/opt_overview) | Guides for how to optimize your diffusion model to run faster and consume less memory. |
|
||||
| [Training](https://huggingface.co/docs/diffusers/training/overview) | Guides for how to train a diffusion model for different tasks with different training techniques. |
|
||||
|
||||
## Supported pipelines
|
||||
|
||||
| Pipeline | Paper | Tasks |
|
||||
|---|---|:---:|
|
||||
| [alt_diffusion](./api/pipelines/alt_diffusion) | [**AltDiffusion**](https://arxiv.org/abs/2211.06679) | Image-to-Image Text-Guided Generation |
|
||||
| [audio_diffusion](./api/pipelines/audio_diffusion) | [**Audio Diffusion**](https://github.com/teticio/audio-diffusion.git) | Unconditional Audio Generation |
|
||||
| [controlnet](./api/pipelines/stable_diffusion/controlnet) | [**ControlNet with Stable Diffusion**](https://arxiv.org/abs/2302.05543) | Image-to-Image Text-Guided Generation |
|
||||
| [cycle_diffusion](./api/pipelines/cycle_diffusion) | [**Cycle Diffusion**](https://arxiv.org/abs/2210.05559) | Image-to-Image Text-Guided Generation |
|
||||
| [dance_diffusion](./api/pipelines/dance_diffusion) | [**Dance Diffusion**](https://github.com/williamberman/diffusers.git) | Unconditional Audio Generation |
|
||||
| [ddpm](./api/pipelines/ddpm) | [**Denoising Diffusion Probabilistic Models**](https://arxiv.org/abs/2006.11239) | Unconditional Image Generation |
|
||||
| [ddim](./api/pipelines/ddim) | [**Denoising Diffusion Implicit Models**](https://arxiv.org/abs/2010.02502) | Unconditional Image Generation |
|
||||
| [latent_diffusion](./api/pipelines/latent_diffusion) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752)| Text-to-Image Generation |
|
||||
| [latent_diffusion](./api/pipelines/latent_diffusion) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752)| Super Resolution Image-to-Image |
|
||||
| [latent_diffusion_uncond](./api/pipelines/latent_diffusion_uncond) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752) | Unconditional Image Generation |
|
||||
| [paint_by_example](./api/pipelines/paint_by_example) | [**Paint by Example: Exemplar-based Image Editing with Diffusion Models**](https://arxiv.org/abs/2211.13227) | Image-Guided Image Inpainting |
|
||||
| [pndm](./api/pipelines/pndm) | [**Pseudo Numerical Methods for Diffusion Models on Manifolds**](https://arxiv.org/abs/2202.09778) | Unconditional Image Generation |
|
||||
| [score_sde_ve](./api/pipelines/score_sde_ve) | [**Score-Based Generative Modeling through Stochastic Differential Equations**](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
|
||||
| [score_sde_vp](./api/pipelines/score_sde_vp) | [**Score-Based Generative Modeling through Stochastic Differential Equations**](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
|
||||
| [semantic_stable_diffusion](./api/pipelines/semantic_stable_diffusion) | [**Semantic Guidance**](https://arxiv.org/abs/2301.12247) | Text-Guided Generation |
|
||||
| [stable_diffusion_text2img](./api/pipelines/stable_diffusion/text2img) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | Text-to-Image Generation |
|
||||
| [stable_diffusion_img2img](./api/pipelines/stable_diffusion/img2img) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | Image-to-Image Text-Guided Generation |
|
||||
| [stable_diffusion_inpaint](./api/pipelines/stable_diffusion/inpaint) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | Text-Guided Image Inpainting |
|
||||
| [stable_diffusion_panorama](./api/pipelines/stable_diffusion/panorama) | [**MultiDiffusion**](https://multidiffusion.github.io/) | Text-to-Panorama Generation |
|
||||
| [stable_diffusion_pix2pix](./api/pipelines/stable_diffusion/pix2pix) | [**InstructPix2Pix**](https://github.com/timothybrooks/instruct-pix2pix) | Text-Guided Image Editing|
|
||||
| [stable_diffusion_pix2pix_zero](./api/pipelines/stable_diffusion/pix2pix_zero) | [**Zero-shot Image-to-Image Translation**](https://pix2pixzero.github.io/) | Text-Guided Image Editing |
|
||||
| [stable_diffusion_attend_and_excite](./api/pipelines/stable_diffusion/attend_and_excite) | [**Attend and Excite for Stable Diffusion**](https://attendandexcite.github.io/Attend-and-Excite/) | Text-to-Image Generation |
|
||||
| [stable_diffusion_self_attention_guidance](./api/pipelines/stable_diffusion/self_attention_guidance) | [**Self-Attention Guidance**](https://ku-cvlab.github.io/Self-Attention-Guidance) | Text-to-Image Generation |
|
||||
| [stable_diffusion_image_variation](./stable_diffusion/image_variation) | [**Stable Diffusion Image Variations**](https://github.com/LambdaLabsML/lambda-diffusers#stable-diffusion-image-variations) | Image-to-Image Generation |
|
||||
| [stable_diffusion_latent_upscale](./stable_diffusion/latent_upscale) | [**Stable Diffusion Latent Upscaler**](https://twitter.com/StabilityAI/status/1590531958815064065) | Text-Guided Super Resolution Image-to-Image |
|
||||
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [**Stable Diffusion 2**](https://stability.ai/blog/stable-diffusion-v2-release) | Text-to-Image Generation |
|
||||
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [**Stable Diffusion 2**](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Image Inpainting |
|
||||
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [**Depth-Conditional Stable Diffusion**](https://github.com/Stability-AI/stablediffusion#depth-conditional-stable-diffusion) | Depth-to-Image Generation |
|
||||
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [**Stable Diffusion 2**](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Super Resolution Image-to-Image |
|
||||
| [stable_diffusion_safe](./api/pipelines/stable_diffusion_safe) | [**Safe Stable Diffusion**](https://arxiv.org/abs/2211.05105) | Text-Guided Generation |
|
||||
| [stable_unclip](./stable_unclip) | **Stable unCLIP** | Text-to-Image Generation |
|
||||
| [stable_unclip](./stable_unclip) | **Stable unCLIP** | Image-to-Image Text-Guided Generation |
|
||||
| [stochastic_karras_ve](./api/pipelines/stochastic_karras_ve) | [**Elucidating the Design Space of Diffusion-Based Generative Models**](https://arxiv.org/abs/2206.00364) | Unconditional Image Generation |
|
||||
| [unclip](./api/pipelines/unclip) | [Hierarchical Text-Conditional Image Generation with CLIP Latents](https://arxiv.org/abs/2204.06125) | Text-to-Image Generation |
|
||||
| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Text-to-Image Generation |
|
||||
| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Image Variations Generation |
|
||||
| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Dual Image and Text Guided Generation |
|
||||
| [vq_diffusion](./api/pipelines/vq_diffusion) | [Vector Quantized Diffusion Model for Text-to-Image Synthesis](https://arxiv.org/abs/2111.14822) | Text-to-Image Generation |
|
||||
|
||||
## Contribution
|
||||
|
||||
We ❤️ contributions from the open-source community!
|
||||
We ❤️ contributions from the open-source community!
|
||||
If you want to contribute to this library, please check out our [Contribution guide](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md).
|
||||
You can look out for [issues](https://github.com/huggingface/diffusers/issues) you'd like to tackle to contribute to the library.
|
||||
- See [Good first issues](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) for general opportunities to contribute
|
||||
@@ -160,6 +117,87 @@ You can look out for [issues](https://github.com/huggingface/diffusers/issues) y
|
||||
Also, say 👋 in our public Discord channel <a href="https://discord.gg/G7tWnz98XR"><img alt="Join us on Discord" src="https://img.shields.io/discord/823813159592001537?color=5865F2&logo=discord&logoColor=white"></a>. We discuss the hottest trends about diffusion models, help each other with contributions, personal projects or
|
||||
just hang out ☕.
|
||||
|
||||
|
||||
## Popular Tasks & Pipelines
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th>Task</th>
|
||||
<th>Pipeline</th>
|
||||
<th>🤗 Hub</th>
|
||||
</tr>
|
||||
<tr style="border-top: 2px solid black">
|
||||
<td>Unconditional Image Generation</td>
|
||||
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/ddpm"> DDPM </a></td>
|
||||
<td><a href="https://huggingface.co/google/ddpm-ema-church-256"> google/ddpm-ema-church-256 </a></td>
|
||||
</tr>
|
||||
<tr style="border-top: 2px solid black">
|
||||
<td>Text-to-Image</td>
|
||||
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/text2img">Stable Diffusion Text-to-Image</a></td>
|
||||
<td><a href="https://huggingface.co/runwayml/stable-diffusion-v1-5"> runwayml/stable-diffusion-v1-5 </a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Text-to-Image</td>
|
||||
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/unclip">unclip</a></td>
|
||||
<td><a href="https://huggingface.co/kakaobrain/karlo-v1-alpha"> kakaobrain/karlo-v1-alpha </a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Text-to-Image</td>
|
||||
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/if">if</a></td>
|
||||
<td><a href="https://huggingface.co/DeepFloyd/IF-I-XL-v1.0"> DeepFloyd/IF-I-XL-v1.0 </a></td>
|
||||
</tr>
|
||||
<tr style="border-top: 2px solid black">
|
||||
<td>Text-guided Image-to-Image</td>
|
||||
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/controlnet">Controlnet</a></td>
|
||||
<td><a href="https://huggingface.co/lllyasviel/sd-controlnet-canny"> lllyasviel/sd-controlnet-canny </a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Text-guided Image-to-Image</td>
|
||||
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/pix2pix">Instruct Pix2Pix</a></td>
|
||||
<td><a href="https://huggingface.co/timbrooks/instruct-pix2pix"> timbrooks/instruct-pix2pix </a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Text-guided Image-to-Image</td>
|
||||
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/img2img">Stable Diffusion Image-to-Image</a></td>
|
||||
<td><a href="https://huggingface.co/runwayml/stable-diffusion-v1-5"> runwayml/stable-diffusion-v1-5 </a></td>
|
||||
</tr>
|
||||
<tr style="border-top: 2px solid black">
|
||||
<td>Text-guided Image Inpainting</td>
|
||||
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/inpaint">Stable Diffusion Inpaint</a></td>
|
||||
<td><a href="https://huggingface.co/runwayml/stable-diffusion-inpainting"> runwayml/stable-diffusion-inpainting </a></td>
|
||||
</tr>
|
||||
<tr style="border-top: 2px solid black">
|
||||
<td>Image Variation</td>
|
||||
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/image_variation">Stable Diffusion Image Variation</a></td>
|
||||
<td><a href="https://huggingface.co/lambdalabs/sd-image-variations-diffusers"> lambdalabs/sd-image-variations-diffusers </a></td>
|
||||
</tr>
|
||||
<tr style="border-top: 2px solid black">
|
||||
<td>Super Resolution</td>
|
||||
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/upscale">Stable Diffusion Upscale</a></td>
|
||||
<td><a href="https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler"> stabilityai/stable-diffusion-x4-upscaler </a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Super Resolution</td>
|
||||
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/latent_upscale">Stable Diffusion Latent Upscale</a></td>
|
||||
<td><a href="https://huggingface.co/stabilityai/sd-x2-latent-upscaler"> stabilityai/sd-x2-latent-upscaler </a></td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## Popular libraries using 🧨 Diffusers
|
||||
|
||||
- https://github.com/microsoft/TaskMatrix
|
||||
- https://github.com/invoke-ai/InvokeAI
|
||||
- https://github.com/apple/ml-stable-diffusion
|
||||
- https://github.com/Sanster/lama-cleaner
|
||||
- https://github.com/IDEA-Research/Grounded-Segment-Anything
|
||||
- https://github.com/ashawkey/stable-dreamfusion
|
||||
- https://github.com/deep-floyd/IF
|
||||
- https://github.com/bentoml/BentoML
|
||||
- https://github.com/bmaltais/kohya_ss
|
||||
- +3000 other amazing GitHub repositories 💪
|
||||
|
||||
Thank you for using us ❤️
|
||||
|
||||
## Credits
|
||||
|
||||
This library concretizes previous work by many different authors and would not have been possible without their great research and implementations. We'd like to thank, in particular, the following implementations which have helped us in our development and without which the API could not have been as polished today:
|
||||
|
||||
@@ -26,7 +26,7 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
torch \
|
||||
torchvision \
|
||||
torchaudio \
|
||||
torchaudio && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
accelerate \
|
||||
datasets \
|
||||
@@ -37,6 +37,9 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip && \
|
||||
numpy \
|
||||
scipy \
|
||||
tensorboard \
|
||||
transformers
|
||||
transformers \
|
||||
omegaconf \
|
||||
pytorch-lightning \
|
||||
xformers
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
|
||||
@@ -6,4 +6,4 @@ INSTALL_CONTENT = """
|
||||
# ! pip install git+https://github.com/huggingface/diffusers.git
|
||||
"""
|
||||
|
||||
notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
|
||||
notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
|
||||
|
||||
@@ -26,8 +26,10 @@
|
||||
title: Load and compare different schedulers
|
||||
- local: using-diffusers/custom_pipeline_overview
|
||||
title: Load community pipelines
|
||||
- local: using-diffusers/kerascv
|
||||
title: Load KerasCV Stable Diffusion checkpoints
|
||||
- local: using-diffusers/using_safetensors
|
||||
title: Load safetensors
|
||||
- local: using-diffusers/other-formats
|
||||
title: Load different Stable Diffusion formats
|
||||
title: Loading & Hub
|
||||
- sections:
|
||||
- local: using-diffusers/pipeline_overview
|
||||
@@ -42,6 +44,10 @@
|
||||
title: Text-guided image-inpainting
|
||||
- local: using-diffusers/depth2img
|
||||
title: Text-guided depth-to-image
|
||||
- local: using-diffusers/textual_inversion_inference
|
||||
title: Textual inversion
|
||||
- local: training/distributed_inference
|
||||
title: Distributed inference with multiple GPUs
|
||||
- local: using-diffusers/reusing_seeds
|
||||
title: Improve image quality with deterministic generation
|
||||
- local: using-diffusers/reproducibility
|
||||
@@ -50,8 +56,6 @@
|
||||
title: Community pipelines
|
||||
- local: using-diffusers/contribute_pipeline
|
||||
title: How to contribute a community pipeline
|
||||
- local: using-diffusers/using_safetensors
|
||||
title: Using safetensors
|
||||
- local: using-diffusers/stable_diffusion_jax_how_to
|
||||
title: Stable Diffusion in JAX/Flax
|
||||
- local: using-diffusers/weighted_prompts
|
||||
@@ -60,6 +64,10 @@
|
||||
- sections:
|
||||
- local: training/overview
|
||||
title: Overview
|
||||
- local: training/create_dataset
|
||||
title: Create a dataset for training
|
||||
- local: training/adapt_a_model
|
||||
title: Adapt a model to a new task
|
||||
- local: training/unconditional_training
|
||||
title: Unconditional image generation
|
||||
- local: training/text_inversion
|
||||
@@ -124,6 +132,8 @@
|
||||
- sections:
|
||||
- local: api/models
|
||||
title: Models
|
||||
- local: api/attnprocessor
|
||||
title: Attention Processor
|
||||
- local: api/diffusion_pipeline
|
||||
title: Diffusion Pipeline
|
||||
- local: api/logging
|
||||
@@ -134,16 +144,22 @@
|
||||
title: Outputs
|
||||
- local: api/loaders
|
||||
title: Loaders
|
||||
- local: api/utilities
|
||||
title: Utilities
|
||||
title: Main Classes
|
||||
- sections:
|
||||
- local: api/pipelines/overview
|
||||
title: Overview
|
||||
- local: api/pipelines/alt_diffusion
|
||||
title: AltDiffusion
|
||||
- local: api/pipelines/attend_and_excite
|
||||
title: Attend and Excite
|
||||
- local: api/pipelines/audio_diffusion
|
||||
title: Audio Diffusion
|
||||
- local: api/pipelines/audioldm
|
||||
title: AudioLDM
|
||||
- local: api/pipelines/controlnet
|
||||
title: ControlNet
|
||||
- local: api/pipelines/cycle_diffusion
|
||||
title: Cycle Diffusion
|
||||
- local: api/pipelines/dance_diffusion
|
||||
@@ -152,26 +168,36 @@
|
||||
title: DDIM
|
||||
- local: api/pipelines/ddpm
|
||||
title: DDPM
|
||||
- local: api/pipelines/diffedit
|
||||
title: DiffEdit
|
||||
- local: api/pipelines/dit
|
||||
title: DiT
|
||||
- local: api/pipelines/if
|
||||
title: IF
|
||||
- local: api/pipelines/pix2pix
|
||||
title: InstructPix2Pix
|
||||
- local: api/pipelines/kandinsky
|
||||
title: Kandinsky
|
||||
- local: api/pipelines/latent_diffusion
|
||||
title: Latent Diffusion
|
||||
- local: api/pipelines/panorama
|
||||
title: MultiDiffusion Panorama
|
||||
- local: api/pipelines/paint_by_example
|
||||
title: PaintByExample
|
||||
- local: api/pipelines/pix2pix_zero
|
||||
title: Pix2Pix Zero
|
||||
- local: api/pipelines/pndm
|
||||
title: PNDM
|
||||
- local: api/pipelines/repaint
|
||||
title: RePaint
|
||||
- local: api/pipelines/stable_diffusion_safe
|
||||
title: Safe Stable Diffusion
|
||||
- local: api/pipelines/score_sde_ve
|
||||
title: Score SDE VE
|
||||
- local: api/pipelines/self_attention_guidance
|
||||
title: Self-Attention Guidance
|
||||
- local: api/pipelines/semantic_stable_diffusion
|
||||
title: Semantic Guidance
|
||||
- local: api/pipelines/spectrogram_diffusion
|
||||
title: "Spectrogram Diffusion"
|
||||
title: Spectrogram Diffusion
|
||||
- sections:
|
||||
- local: api/pipelines/stable_diffusion/overview
|
||||
title: Overview
|
||||
@@ -185,31 +211,21 @@
|
||||
title: Depth-to-Image
|
||||
- local: api/pipelines/stable_diffusion/image_variation
|
||||
title: Image-Variation
|
||||
- local: api/pipelines/stable_diffusion/upscale
|
||||
title: Super-Resolution
|
||||
- local: api/pipelines/stable_diffusion/stable_diffusion_safe
|
||||
title: Safe Stable Diffusion
|
||||
- local: api/pipelines/stable_diffusion/stable_diffusion_2
|
||||
title: Stable Diffusion 2
|
||||
- local: api/pipelines/stable_diffusion/latent_upscale
|
||||
title: Stable-Diffusion-Latent-Upscaler
|
||||
- local: api/pipelines/stable_diffusion/pix2pix
|
||||
title: InstructPix2Pix
|
||||
- local: api/pipelines/stable_diffusion/attend_and_excite
|
||||
title: Attend and Excite
|
||||
- local: api/pipelines/stable_diffusion/pix2pix_zero
|
||||
title: Pix2Pix Zero
|
||||
- local: api/pipelines/stable_diffusion/self_attention_guidance
|
||||
title: Self-Attention Guidance
|
||||
- local: api/pipelines/stable_diffusion/panorama
|
||||
title: MultiDiffusion Panorama
|
||||
- local: api/pipelines/stable_diffusion/controlnet
|
||||
title: Text-to-Image Generation with ControlNet Conditioning
|
||||
- local: api/pipelines/stable_diffusion/model_editing
|
||||
title: Text-to-Image Model Editing
|
||||
- local: api/pipelines/stable_diffusion/upscale
|
||||
title: Super-Resolution
|
||||
title: Stable Diffusion
|
||||
- local: api/pipelines/stable_diffusion_2
|
||||
title: Stable Diffusion 2
|
||||
- local: api/pipelines/stable_unclip
|
||||
title: Stable unCLIP
|
||||
- local: api/pipelines/stochastic_karras_ve
|
||||
title: Stochastic Karras VE
|
||||
- local: api/pipelines/model_editing
|
||||
title: Text-to-Image Model Editing
|
||||
- local: api/pipelines/text_to_video
|
||||
title: Text-to-Video
|
||||
- local: api/pipelines/text_to_video_zero
|
||||
@@ -218,6 +234,8 @@
|
||||
title: UnCLIP
|
||||
- local: api/pipelines/latent_diffusion_uncond
|
||||
title: Unconditional Latent Diffusion
|
||||
- local: api/pipelines/unidiffuser
|
||||
title: UniDiffuser
|
||||
- local: api/pipelines/versatile_diffusion
|
||||
title: Versatile Diffusion
|
||||
- local: api/pipelines/vq_diffusion
|
||||
@@ -238,12 +256,16 @@
|
||||
title: DPM Discrete Scheduler
|
||||
- local: api/schedulers/dpm_discrete_ancestral
|
||||
title: DPM Discrete Scheduler with ancestral sampling
|
||||
- local: api/schedulers/dpm_sde
|
||||
title: DPMSolverSDEScheduler
|
||||
- local: api/schedulers/euler_ancestral
|
||||
title: Euler Ancestral Scheduler
|
||||
- local: api/schedulers/euler
|
||||
title: Euler scheduler
|
||||
- local: api/schedulers/heun
|
||||
title: Heun Scheduler
|
||||
- local: api/schedulers/multistep_dpm_solver_inverse
|
||||
title: Inverse Multistep DPM-Solver
|
||||
- local: api/schedulers/ipndm
|
||||
title: IPNDM
|
||||
- local: api/schedulers/lms_discrete
|
||||
|
||||
42
docs/source/en/api/attnprocessor.mdx
Normal file
42
docs/source/en/api/attnprocessor.mdx
Normal file
@@ -0,0 +1,42 @@
|
||||
# Attention Processor
|
||||
|
||||
An attention processor is a class for applying different types of attention mechanisms.
|
||||
|
||||
## AttnProcessor
|
||||
[[autodoc]] models.attention_processor.AttnProcessor
|
||||
|
||||
## AttnProcessor2_0
|
||||
[[autodoc]] models.attention_processor.AttnProcessor2_0
|
||||
|
||||
## LoRAAttnProcessor
|
||||
[[autodoc]] models.attention_processor.LoRAAttnProcessor
|
||||
|
||||
## LoRAAttnProcessor2_0
|
||||
[[autodoc]] models.attention_processor.LoRAAttnProcessor2_0
|
||||
|
||||
## CustomDiffusionAttnProcessor
|
||||
[[autodoc]] models.attention_processor.CustomDiffusionAttnProcessor
|
||||
|
||||
## AttnAddedKVProcessor
|
||||
[[autodoc]] models.attention_processor.AttnAddedKVProcessor
|
||||
|
||||
## AttnAddedKVProcessor2_0
|
||||
[[autodoc]] models.attention_processor.AttnAddedKVProcessor2_0
|
||||
|
||||
## LoRAAttnAddedKVProcessor
|
||||
[[autodoc]] models.attention_processor.LoRAAttnAddedKVProcessor
|
||||
|
||||
## XFormersAttnProcessor
|
||||
[[autodoc]] models.attention_processor.XFormersAttnProcessor
|
||||
|
||||
## LoRAXFormersAttnProcessor
|
||||
[[autodoc]] models.attention_processor.LoRAXFormersAttnProcessor
|
||||
|
||||
## CustomDiffusionXFormersAttnProcessor
|
||||
[[autodoc]] models.attention_processor.CustomDiffusionXFormersAttnProcessor
|
||||
|
||||
## SlicedAttnProcessor
|
||||
[[autodoc]] models.attention_processor.SlicedAttnProcessor
|
||||
|
||||
## SlicedAttnAddedKVProcessor
|
||||
[[autodoc]] models.attention_processor.SlicedAttnAddedKVProcessor
|
||||
@@ -12,36 +12,25 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Pipelines
|
||||
|
||||
The [`DiffusionPipeline`] is the easiest way to load any pretrained diffusion pipeline from the [Hub](https://huggingface.co/models?library=diffusers) and to use it in inference.
|
||||
The [`DiffusionPipeline`] is the easiest way to load any pretrained diffusion pipeline from the [Hub](https://huggingface.co/models?library=diffusers) and use it for inference.
|
||||
|
||||
<Tip>
|
||||
|
||||
One should not use the Diffusion Pipeline class for training or fine-tuning a diffusion model. Individual
|
||||
components of diffusion pipelines are usually trained individually, so we suggest to directly work
|
||||
with [`UNetModel`] and [`UNetConditionModel`].
|
||||
You shouldn't use the [`DiffusionPipeline`] class for training or finetuning a diffusion model. Individual
|
||||
components (for example, [`UNetModel`] and [`UNetConditionModel`]) of diffusion pipelines are usually trained individually, so we suggest directly working with instead.
|
||||
|
||||
</Tip>
|
||||
|
||||
Any diffusion pipeline that is loaded with [`~DiffusionPipeline.from_pretrained`] will automatically
|
||||
detect the pipeline type, *e.g.* [`StableDiffusionPipeline`] and consequently load each component of the
|
||||
pipeline and pass them into the `__init__` function of the pipeline, *e.g.* [`~StableDiffusionPipeline.__init__`].
|
||||
The pipeline type (for example [`StableDiffusionPipeline`]) of any diffusion pipeline loaded with [`~DiffusionPipeline.from_pretrained`] is automatically
|
||||
detected and pipeline components are loaded and passed to the `__init__` function of the pipeline.
|
||||
|
||||
Any pipeline object can be saved locally with [`~DiffusionPipeline.save_pretrained`].
|
||||
|
||||
## DiffusionPipeline
|
||||
|
||||
[[autodoc]] DiffusionPipeline
|
||||
- all
|
||||
- __call__
|
||||
- device
|
||||
- to
|
||||
- components
|
||||
|
||||
## ImagePipelineOutput
|
||||
By default diffusion pipelines return an object of class
|
||||
|
||||
[[autodoc]] pipelines.ImagePipelineOutput
|
||||
|
||||
## AudioPipelineOutput
|
||||
By default diffusion pipelines return an object of class
|
||||
|
||||
[[autodoc]] pipelines.AudioPipelineOutput
|
||||
|
||||
@@ -61,7 +61,7 @@ verbose to the most verbose), those levels (with their corresponding int values
|
||||
critical errors.
|
||||
- `diffusers.logging.ERROR` (int value, 40): only report errors.
|
||||
- `diffusers.logging.WARNING` or `diffusers.logging.WARN` (int value, 30): only reports error and
|
||||
warnings. This the default level used by the library.
|
||||
warnings. This is the default level used by the library.
|
||||
- `diffusers.logging.INFO` (int value, 20): reports error, warnings and basic information.
|
||||
- `diffusers.logging.DEBUG` (int value, 10): report all information.
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License.
|
||||
# Models
|
||||
|
||||
Diffusers contains pretrained models for popular algorithms and modules for creating the next set of diffusion models.
|
||||
The primary function of these models is to denoise an input sample, by modeling the distribution $p_\theta(\mathbf{x}_{t-1}|\mathbf{x}_t)$.
|
||||
The primary function of these models is to denoise an input sample, by modeling the distribution \\(p_{\theta}(x_{t-1}|x_{t})\\).
|
||||
The models are built on the base class ['ModelMixin'] that is a `torch.nn.module` with basic functionality for saving and loading models both locally and from the HuggingFace hub.
|
||||
|
||||
## ModelMixin
|
||||
|
||||
@@ -12,11 +12,11 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# BaseOutputs
|
||||
|
||||
All models have outputs that are instances of subclasses of [`~utils.BaseOutput`]. Those are
|
||||
data structures containing all the information returned by the model, but that can also be used as tuples or
|
||||
All models have outputs that are subclasses of [`~utils.BaseOutput`]. Those are
|
||||
data structures containing all the information returned by the model, but they can also be used as tuples or
|
||||
dictionaries.
|
||||
|
||||
Let's see how this looks in an example:
|
||||
For example:
|
||||
|
||||
```python
|
||||
from diffusers import DDIMPipeline
|
||||
@@ -25,31 +25,45 @@ pipeline = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32")
|
||||
outputs = pipeline()
|
||||
```
|
||||
|
||||
The `outputs` object is a [`~pipelines.ImagePipelineOutput`], as we can see in the
|
||||
documentation of that class below, it means it has an image attribute.
|
||||
The `outputs` object is a [`~pipelines.ImagePipelineOutput`] which means it has an image attribute.
|
||||
|
||||
You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you will get `None`:
|
||||
You can access each attribute as you normally would or with a keyword lookup, and if that attribute is not returned by the model, you will get `None`:
|
||||
|
||||
```python
|
||||
outputs.images
|
||||
```
|
||||
|
||||
or via keyword lookup
|
||||
|
||||
```python
|
||||
outputs["images"]
|
||||
```
|
||||
|
||||
When considering our `outputs` object as tuple, it only considers the attributes that don't have `None` values.
|
||||
Here for instance, we could retrieve images via indexing:
|
||||
When considering the `outputs` object as a tuple, it only considers the attributes that don't have `None` values.
|
||||
For instance, retrieving an image by indexing into it returns the tuple `(outputs.images)`:
|
||||
|
||||
```python
|
||||
outputs[:1]
|
||||
```
|
||||
|
||||
which will return the tuple `(outputs.images)` for instance.
|
||||
<Tip>
|
||||
|
||||
To check a specific pipeline or model output, refer to its corresponding API documentation.
|
||||
|
||||
</Tip>
|
||||
|
||||
## BaseOutput
|
||||
|
||||
[[autodoc]] utils.BaseOutput
|
||||
- to_tuple
|
||||
|
||||
## ImagePipelineOutput
|
||||
|
||||
[[autodoc]] pipelines.ImagePipelineOutput
|
||||
|
||||
## FlaxImagePipelineOutput
|
||||
|
||||
[[autodoc]] pipelines.pipeline_flax_utils.FlaxImagePipelineOutput
|
||||
|
||||
## AudioPipelineOutput
|
||||
|
||||
[[autodoc]] pipelines.AudioPipelineOutput
|
||||
|
||||
## ImageTextPipelineOutput
|
||||
|
||||
[[autodoc]] ImageTextPipelineOutput
|
||||
@@ -22,7 +22,7 @@ The abstract of the paper is the following:
|
||||
|
||||
*We present a neural network structure, ControlNet, to control pretrained large diffusion models to support additional input conditions. The ControlNet learns task-specific conditions in an end-to-end way, and the learning is robust even when the training dataset is small (< 50k). Moreover, training a ControlNet is as fast as fine-tuning a diffusion model, and the model can be trained on a personal devices. Alternatively, if powerful computation clusters are available, the model can scale to large amounts (millions to billions) of data. We report that large diffusion models like Stable Diffusion can be augmented with ControlNets to enable conditional inputs like edge maps, segmentation maps, keypoints, etc. This may enrich the methods to control large diffusion models and further facilitate related applications.*
|
||||
|
||||
This model was contributed by the amazing community contributor [takuma104](https://huggingface.co/takuma104) ❤️ .
|
||||
This model was contributed by the community contributor [takuma104](https://huggingface.co/takuma104) ❤️ .
|
||||
|
||||
Resources:
|
||||
|
||||
@@ -33,7 +33,9 @@ Resources:
|
||||
|
||||
| Pipeline | Tasks | Demo
|
||||
|---|---|:---:|
|
||||
| [StableDiffusionControlNetPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_controlnet.py) | *Text-to-Image Generation with ControlNet Conditioning* | [Colab Example](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/controlnet.ipynb)
|
||||
| [StableDiffusionControlNetPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/controlnet/pipeline_controlnet.py) | *Text-to-Image Generation with ControlNet Conditioning* | [Colab Example](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/controlnet.ipynb)
|
||||
| [StableDiffusionControlNetImg2ImgPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py) | *Image-to-Image Generation with ControlNet Conditioning* |
|
||||
| [StableDiffusionControlNetInpaintPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_controlnet_inpaint.py) | *Inpainting Generation with ControlNet Conditioning* |
|
||||
|
||||
## Usage example
|
||||
|
||||
@@ -301,21 +303,22 @@ All checkpoints can be found under the authors' namespace [lllyasviel](https://h
|
||||
|
||||
### ControlNet v1.1
|
||||
|
||||
| Model Name | Control Image Overview| Control Image Example | Generated Image Example |
|
||||
|---|---|---|---|
|
||||
|[lllyasviel/control_v11p_sd15_canny](https://huggingface.co/lllyasviel/control_v11p_sd15_canny)<br/> *Trained with canny edge detection* | A monochrome image with white edges on a black background.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11e_sd15_ip2p](https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p)<br/> *Trained with pixel to pixel instruction* | No condition .|<a href="https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15_inpaint](https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint)<br/> Trained with image inpainting | No condition.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/output.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/output.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15_mlsd](https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd)<br/> Trained with multi-level line segment detection | An image with annotated line segments.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11f1p_sd15_depth](https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth)<br/> Trained with depth estimation | An image with depth information, usually represented as a grayscale image.|<a href="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15_normalbae](https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae)<br/> Trained with surface normal estimation | An image with surface normal information, usually represented as a color-coded image.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15_seg](https://huggingface.co/lllyasviel/control_v11p_sd15_seg)<br/> Trained with image segmentation | An image with segmented regions, usually represented as a color-coded image.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_seg/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_seg/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_seg/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_seg/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15_lineart](https://huggingface.co/lllyasviel/control_v11p_sd15_lineart)<br/> Trained with line art generation | An image with line art, usually black lines on a white background.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_lineart/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_lineart/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_lineart/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_lineart/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15s2_lineart_anime](https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime)<br/> Trained with anime line art generation | An image with anime-style line art.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15_openpose](https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime)<br/> Trained with human pose estimation | An image with human poses, usually represented as a set of keypoints or skeletons.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15_scribble](https://huggingface.co/lllyasviel/control_v11p_sd15_scribble)<br/> Trained with scribble-based image generation | An image with scribbles, usually random or user-drawn strokes.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_scribble/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_scribble/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_scribble/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_scribble/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15_softedge](https://huggingface.co/lllyasviel/control_v11p_sd15_softedge)<br/> Trained with soft edge image generation | An image with soft edges, usually to create a more painterly or artistic effect.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_softedge/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_softedge/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_softedge/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_softedge/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11e_sd15_shuffle](https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle)<br/> Trained with image shuffling | An image with shuffled patches or regions.|<a href="https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/image_out.png"/></a>|
|
||||
| Model Name | Control Image Overview| Condition Image | Control Image Example | Generated Image Example |
|
||||
|---|---|---|---|---|
|
||||
|[lllyasviel/control_v11p_sd15_canny](https://huggingface.co/lllyasviel/control_v11p_sd15_canny)<br/> | *Trained with canny edge detection* | A monochrome image with white edges on a black background.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11e_sd15_ip2p](https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p)<br/> | *Trained with pixel to pixel instruction* | No condition .|<a href="https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15_inpaint](https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint)<br/> | Trained with image inpainting | No condition.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/output.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/output.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15_mlsd](https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd)<br/> | Trained with multi-level line segment detection | An image with annotated line segments.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11f1p_sd15_depth](https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth)<br/> | Trained with depth estimation | An image with depth information, usually represented as a grayscale image.|<a href="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15_normalbae](https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae)<br/> | Trained with surface normal estimation | An image with surface normal information, usually represented as a color-coded image.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15_seg](https://huggingface.co/lllyasviel/control_v11p_sd15_seg)<br/> | Trained with image segmentation | An image with segmented regions, usually represented as a color-coded image.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_seg/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_seg/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_seg/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_seg/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15_lineart](https://huggingface.co/lllyasviel/control_v11p_sd15_lineart)<br/> | Trained with line art generation | An image with line art, usually black lines on a white background.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_lineart/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_lineart/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_lineart/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_lineart/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15s2_lineart_anime](https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime)<br/> | Trained with anime line art generation | An image with anime-style line art.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15_openpose](https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime)<br/> | Trained with human pose estimation | An image with human poses, usually represented as a set of keypoints or skeletons.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15_scribble](https://huggingface.co/lllyasviel/control_v11p_sd15_scribble)<br/> | Trained with scribble-based image generation | An image with scribbles, usually random or user-drawn strokes.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_scribble/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_scribble/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_scribble/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_scribble/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11p_sd15_softedge](https://huggingface.co/lllyasviel/control_v11p_sd15_softedge)<br/> | Trained with soft edge image generation | An image with soft edges, usually to create a more painterly or artistic effect.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_softedge/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_softedge/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_softedge/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_softedge/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11e_sd15_shuffle](https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle)<br/> | Trained with image shuffling | An image with shuffled patches or regions.|<a href="https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/image_out.png"/></a>|
|
||||
|[lllyasviel/control_v11f1e_sd15_tile](https://huggingface.co/lllyasviel/control_v11f1e_sd15_tile)<br/> | Trained with image tiling | A blurry image or part of an image .|<a href="https://huggingface.co/lllyasviel/control_v11f1e_sd15_tile/resolve/main/images/original.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11f1e_sd15_tile/resolve/main/images/original.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11f1e_sd15_tile/resolve/main/images/output.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11f1e_sd15_tile/resolve/main/images/output.png"/></a>|
|
||||
|
||||
## StableDiffusionControlNetPipeline
|
||||
[[autodoc]] StableDiffusionControlNetPipeline
|
||||
@@ -329,6 +332,30 @@ All checkpoints can be found under the authors' namespace [lllyasviel](https://h
|
||||
- disable_xformers_memory_efficient_attention
|
||||
- load_textual_inversion
|
||||
|
||||
## StableDiffusionControlNetImg2ImgPipeline
|
||||
[[autodoc]] StableDiffusionControlNetImg2ImgPipeline
|
||||
- all
|
||||
- __call__
|
||||
- enable_attention_slicing
|
||||
- disable_attention_slicing
|
||||
- enable_vae_slicing
|
||||
- disable_vae_slicing
|
||||
- enable_xformers_memory_efficient_attention
|
||||
- disable_xformers_memory_efficient_attention
|
||||
- load_textual_inversion
|
||||
|
||||
## StableDiffusionControlNetInpaintPipeline
|
||||
[[autodoc]] StableDiffusionControlNetInpaintPipeline
|
||||
- all
|
||||
- __call__
|
||||
- enable_attention_slicing
|
||||
- disable_attention_slicing
|
||||
- enable_vae_slicing
|
||||
- disable_vae_slicing
|
||||
- enable_xformers_memory_efficient_attention
|
||||
- disable_xformers_memory_efficient_attention
|
||||
- load_textual_inversion
|
||||
|
||||
## FlaxStableDiffusionControlNetPipeline
|
||||
[[autodoc]] FlaxStableDiffusionControlNetPipeline
|
||||
- all
|
||||
360
docs/source/en/api/pipelines/diffedit.mdx
Normal file
360
docs/source/en/api/pipelines/diffedit.mdx
Normal file
@@ -0,0 +1,360 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Zero-shot Diffusion-based Semantic Image Editing with Mask Guidance
|
||||
|
||||
## Overview
|
||||
|
||||
[DiffEdit: Diffusion-based semantic image editing with mask guidance](https://arxiv.org/abs/2210.11427) by Guillaume Couairon, Jakob Verbeek, Holger Schwenk, and Matthieu Cord.
|
||||
|
||||
The abstract of the paper is the following:
|
||||
|
||||
*Image generation has recently seen tremendous advances, with diffusion models allowing to synthesize convincing images for a large variety of text prompts. In this article, we propose DiffEdit, a method to take advantage of text-conditioned diffusion models for the task of semantic image editing, where the goal is to edit an image based on a text query. Semantic image editing is an extension of image generation, with the additional constraint that the generated image should be as similar as possible to a given input image. Current editing methods based on diffusion models usually require to provide a mask, making the task much easier by treating it as a conditional inpainting task. In contrast, our main contribution is able to automatically generate a mask highlighting regions of the input image that need to be edited, by contrasting predictions of a diffusion model conditioned on different text prompts. Moreover, we rely on latent inference to preserve content in those regions of interest and show excellent synergies with mask-based diffusion. DiffEdit achieves state-of-the-art editing performance on ImageNet. In addition, we evaluate semantic image editing in more challenging settings, using images from the COCO dataset as well as text-based generated images.*
|
||||
|
||||
Resources:
|
||||
|
||||
* [Paper](https://arxiv.org/abs/2210.11427).
|
||||
* [Blog Post with Demo](https://blog.problemsolversguild.com/technical/research/2022/11/02/DiffEdit-Implementation.html).
|
||||
* [Implementation on Github](https://github.com/Xiang-cd/DiffEdit-stable-diffusion/).
|
||||
|
||||
## Tips
|
||||
|
||||
* The pipeline can generate masks that can be fed into other inpainting pipelines. Check out the code examples below to know more.
|
||||
* In order to generate an image using this pipeline, both an image mask (manually specified or generated using `generate_mask`)
|
||||
and a set of partially inverted latents (generated using `invert`) _must_ be provided as arguments when calling the pipeline to generate the final edited image.
|
||||
Refer to the code examples below for more details.
|
||||
* The function `generate_mask` exposes two prompt arguments, `source_prompt` and `target_prompt`,
|
||||
that let you control the locations of the semantic edits in the final image to be generated. Let's say,
|
||||
you wanted to translate from "cat" to "dog". In this case, the edit direction will be "cat -> dog". To reflect
|
||||
this in the generated mask, you simply have to set the embeddings related to the phrases including "cat" to
|
||||
`source_prompt_embeds` and "dog" to `target_prompt_embeds`. Refer to the code example below for more details.
|
||||
* When generating partially inverted latents using `invert`, assign a caption or text embedding describing the
|
||||
overall image to the `prompt` argument to help guide the inverse latent sampling process. In most cases, the
|
||||
source concept is sufficently descriptive to yield good results, but feel free to explore alternatives.
|
||||
Please refer to [this code example](#generating-image-captions-for-inversion) for more details.
|
||||
* When calling the pipeline to generate the final edited image, assign the source concept to `negative_prompt`
|
||||
and the target concept to `prompt`. Taking the above example, you simply have to set the embeddings related to
|
||||
the phrases including "cat" to `negative_prompt_embeds` and "dog" to `prompt_embeds`. Refer to the code example
|
||||
below for more details.
|
||||
* If you wanted to reverse the direction in the example above, i.e., "dog -> cat", then it's recommended to:
|
||||
* Swap the `source_prompt` and `target_prompt` in the arguments to `generate_mask`.
|
||||
* Change the input prompt for `invert` to include "dog".
|
||||
* Swap the `prompt` and `negative_prompt` in the arguments to call the pipeline to generate the final edited image.
|
||||
* Note that the source and target prompts, or their corresponding embeddings, can also be automatically generated. Please, refer to [this discussion](#generating-source-and-target-embeddings) for more details.
|
||||
|
||||
## Available Pipelines:
|
||||
|
||||
| Pipeline | Tasks
|
||||
|---|---|
|
||||
| [StableDiffusionDiffEditPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_diffedit.py) | *Text-Based Image Editing*
|
||||
|
||||
<!-- TODO: add Colab -->
|
||||
|
||||
## Usage example
|
||||
|
||||
### Based on an input image with a caption
|
||||
|
||||
When the pipeline is conditioned on an input image, we first obtain partially inverted latents from the input image using a
|
||||
`DDIMInverseScheduler` with the help of a caption. Then we generate an editing mask to identify relevant regions in the image using the source and target prompts. Finally,
|
||||
the inverted noise and generated mask is used to start the generation process.
|
||||
|
||||
First, let's load our pipeline:
|
||||
|
||||
```py
|
||||
import torch
|
||||
from diffusers import DDIMScheduler, DDIMInverseScheduler, StableDiffusionPix2PixZeroPipeline
|
||||
|
||||
sd_model_ckpt = "stabilityai/stable-diffusion-2-1"
|
||||
pipeline = StableDiffusionDiffEditPipeline.from_pretrained(
|
||||
sd_model_ckpt,
|
||||
torch_dtype=torch.float16,
|
||||
safety_checker=None,
|
||||
)
|
||||
pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
|
||||
pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)
|
||||
pipeline.enable_model_cpu_offload()
|
||||
pipeline.enable_vae_slicing()
|
||||
generator = torch.manual_seed(0)
|
||||
```
|
||||
|
||||
Then, we load an input image to edit using our method:
|
||||
|
||||
```py
|
||||
from diffusers.utils import load_image
|
||||
|
||||
img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"
|
||||
raw_image = load_image(img_url).convert("RGB").resize((768, 768))
|
||||
```
|
||||
|
||||
Then, we employ the source and target prompts to generate the editing mask:
|
||||
|
||||
```py
|
||||
# See the "Generating source and target embeddings" section below to
|
||||
# automate the generation of these captions with a pre-trained model like Flan-T5 as explained below.
|
||||
|
||||
source_prompt = "a bowl of fruits"
|
||||
target_prompt = "a basket of fruits"
|
||||
mask_image = pipeline.generate_mask(
|
||||
image=raw_image,
|
||||
source_prompt=source_prompt,
|
||||
target_prompt=target_prompt,
|
||||
generator=generator,
|
||||
)
|
||||
```
|
||||
|
||||
Then, we employ the caption and the input image to get the inverted latents:
|
||||
|
||||
```py
|
||||
inv_latents = pipeline.invert(prompt=source_prompt, image=raw_image, generator=generator).latents
|
||||
```
|
||||
|
||||
Now, generate the image with the inverted latents and semantically generated mask:
|
||||
|
||||
```py
|
||||
image = pipeline(
|
||||
prompt=target_prompt,
|
||||
mask_image=mask_image,
|
||||
image_latents=inv_latents,
|
||||
generator=generator,
|
||||
negative_prompt=source_prompt,
|
||||
).images[0]
|
||||
image.save("edited_image.png")
|
||||
```
|
||||
|
||||
## Generating image captions for inversion
|
||||
|
||||
The authors originally used the source concept prompt as the caption for generating the partially inverted latents. However, we can also leverage open source and public image captioning models for the same purpose.
|
||||
Below, we provide an end-to-end example with the [BLIP](https://huggingface.co/docs/transformers/model_doc/blip) model
|
||||
for generating captions.
|
||||
|
||||
First, let's load our automatic image captioning model:
|
||||
|
||||
```py
|
||||
import torch
|
||||
from transformers import BlipForConditionalGeneration, BlipProcessor
|
||||
|
||||
captioner_id = "Salesforce/blip-image-captioning-base"
|
||||
processor = BlipProcessor.from_pretrained(captioner_id)
|
||||
model = BlipForConditionalGeneration.from_pretrained(captioner_id, torch_dtype=torch.float16, low_cpu_mem_usage=True)
|
||||
```
|
||||
|
||||
Then, we define a utility to generate captions from an input image using the model:
|
||||
|
||||
```py
|
||||
@torch.no_grad()
|
||||
def generate_caption(images, caption_generator, caption_processor):
|
||||
text = "a photograph of"
|
||||
|
||||
inputs = caption_processor(images, text, return_tensors="pt").to(device="cuda", dtype=caption_generator.dtype)
|
||||
caption_generator.to("cuda")
|
||||
outputs = caption_generator.generate(**inputs, max_new_tokens=128)
|
||||
|
||||
# offload caption generator
|
||||
caption_generator.to("cpu")
|
||||
|
||||
caption = caption_processor.batch_decode(outputs, skip_special_tokens=True)[0]
|
||||
return caption
|
||||
```
|
||||
|
||||
Then, we load an input image for conditioning and obtain a suitable caption for it:
|
||||
|
||||
```py
|
||||
from diffusers.utils import load_image
|
||||
|
||||
img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"
|
||||
raw_image = load_image(img_url).convert("RGB").resize((768, 768))
|
||||
caption = generate_caption(raw_image, model, processor)
|
||||
```
|
||||
|
||||
Then, we employ the generated caption and the input image to get the inverted latents:
|
||||
|
||||
```py
|
||||
from diffusers import DDIMInverseScheduler, DDIMScheduler
|
||||
|
||||
pipeline = StableDiffusionDiffEditPipeline.from_pretrained(
|
||||
"stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16
|
||||
)
|
||||
pipeline = pipeline.to("cuda")
|
||||
pipeline.enable_model_cpu_offload()
|
||||
pipeline.enable_vae_slicing()
|
||||
|
||||
pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
|
||||
pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)
|
||||
|
||||
generator = torch.manual_seed(0)
|
||||
inv_latents = pipeline.invert(prompt=caption, image=raw_image, generator=generator).latents
|
||||
```
|
||||
|
||||
Now, generate the image with the inverted latents and semantically generated mask from our source and target prompts:
|
||||
|
||||
```py
|
||||
source_prompt = "a bowl of fruits"
|
||||
target_prompt = "a basket of fruits"
|
||||
|
||||
mask_image = pipeline.generate_mask(
|
||||
image=raw_image,
|
||||
source_prompt=source_prompt,
|
||||
target_prompt=target_prompt,
|
||||
generator=generator,
|
||||
)
|
||||
|
||||
image = pipeline(
|
||||
prompt=target_prompt,
|
||||
mask_image=mask_image,
|
||||
image_latents=inv_latents,
|
||||
generator=generator,
|
||||
negative_prompt=source_prompt,
|
||||
).images[0]
|
||||
image.save("edited_image.png")
|
||||
```
|
||||
|
||||
## Generating source and target embeddings
|
||||
|
||||
The authors originally required the user to manually provide the source and target prompts for discovering
|
||||
edit directions. However, we can also leverage open source and public models for the same purpose.
|
||||
Below, we provide an end-to-end example with the [Flan-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5) model
|
||||
for generating source an target embeddings.
|
||||
|
||||
**1. Load the generation model**:
|
||||
|
||||
```py
|
||||
import torch
|
||||
from transformers import AutoTokenizer, T5ForConditionalGeneration
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-xl")
|
||||
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-xl", device_map="auto", torch_dtype=torch.float16)
|
||||
```
|
||||
|
||||
**2. Construct a starting prompt**:
|
||||
|
||||
```py
|
||||
source_concept = "bowl"
|
||||
target_concept = "basket"
|
||||
|
||||
source_text = f"Provide a caption for images containing a {source_concept}. "
|
||||
"The captions should be in English and should be no longer than 150 characters."
|
||||
|
||||
target_text = f"Provide a caption for images containing a {target_concept}. "
|
||||
"The captions should be in English and should be no longer than 150 characters."
|
||||
```
|
||||
|
||||
Here, we're interested in the "bowl -> basket" direction.
|
||||
|
||||
**3. Generate prompts**:
|
||||
|
||||
We can use a utility like so for this purpose.
|
||||
|
||||
```py
|
||||
@torch.no_grad
|
||||
def generate_prompts(input_prompt):
|
||||
input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids.to("cuda")
|
||||
|
||||
outputs = model.generate(
|
||||
input_ids, temperature=0.8, num_return_sequences=16, do_sample=True, max_new_tokens=128, top_k=10
|
||||
)
|
||||
return tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
```
|
||||
|
||||
And then we just call it to generate our prompts:
|
||||
|
||||
```py
|
||||
source_prompts = generate_prompts(source_text)
|
||||
target_prompts = generate_prompts(target_text)
|
||||
```
|
||||
|
||||
We encourage you to play around with the different parameters supported by the
|
||||
`generate()` method ([documentation](https://huggingface.co/docs/transformers/main/en/main_classes/text_generation#transformers.generation_tf_utils.TFGenerationMixin.generate)) for the generation quality you are looking for.
|
||||
|
||||
**4. Load the embedding model**:
|
||||
|
||||
Here, we need to use the same text encoder model used by the subsequent Stable Diffusion model.
|
||||
|
||||
```py
|
||||
from diffusers import StableDiffusionDiffEditPipeline
|
||||
|
||||
pipeline = StableDiffusionDiffEditPipeline.from_pretrained(
|
||||
"stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16
|
||||
)
|
||||
pipeline = pipeline.to("cuda")
|
||||
pipeline.enable_model_cpu_offload()
|
||||
pipeline.enable_vae_slicing()
|
||||
|
||||
generator = torch.manual_seed(0)
|
||||
```
|
||||
|
||||
**5. Compute embeddings**:
|
||||
|
||||
```py
|
||||
import torch
|
||||
|
||||
@torch.no_grad()
|
||||
def embed_prompts(sentences, tokenizer, text_encoder, device="cuda"):
|
||||
embeddings = []
|
||||
for sent in sentences:
|
||||
text_inputs = tokenizer(
|
||||
sent,
|
||||
padding="max_length",
|
||||
max_length=tokenizer.model_max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
text_input_ids = text_inputs.input_ids
|
||||
prompt_embeds = text_encoder(text_input_ids.to(device), attention_mask=None)[0]
|
||||
embeddings.append(prompt_embeds)
|
||||
return torch.concatenate(embeddings, dim=0).mean(dim=0).unsqueeze(0)
|
||||
|
||||
source_embeddings = embed_prompts(source_prompts, pipeline.tokenizer, pipeline.text_encoder)
|
||||
target_embeddings = embed_prompts(target_captions, pipeline.tokenizer, pipeline.text_encoder)
|
||||
```
|
||||
|
||||
And you're done! Now, you can use these embeddings directly while calling the pipeline:
|
||||
|
||||
```py
|
||||
from diffusers import DDIMInverseScheduler, DDIMScheduler
|
||||
from diffusers.utils import load_image
|
||||
|
||||
pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
|
||||
pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)
|
||||
|
||||
img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"
|
||||
raw_image = load_image(img_url).convert("RGB").resize((768, 768))
|
||||
|
||||
|
||||
mask_image = pipeline.generate_mask(
|
||||
image=raw_image,
|
||||
source_prompt_embeds=source_embeds,
|
||||
target_prompt_embeds=target_embeds,
|
||||
generator=generator,
|
||||
)
|
||||
|
||||
inv_latents = pipeline.invert(
|
||||
prompt_embeds=source_embeds,
|
||||
image=raw_image,
|
||||
generator=generator,
|
||||
).latents
|
||||
|
||||
images = pipeline(
|
||||
mask_image=mask_image,
|
||||
image_latents=inv_latents,
|
||||
prompt_embeds=target_embeddings,
|
||||
negative_prompt_embeds=source_embeddings,
|
||||
generator=generator,
|
||||
).images
|
||||
images[0].save("edited_image.png")
|
||||
```
|
||||
|
||||
## StableDiffusionDiffEditPipeline
|
||||
[[autodoc]] StableDiffusionDiffEditPipeline
|
||||
- all
|
||||
- generate_mask
|
||||
- invert
|
||||
- __call__
|
||||
351
docs/source/en/api/pipelines/kandinsky.mdx
Normal file
351
docs/source/en/api/pipelines/kandinsky.mdx
Normal file
@@ -0,0 +1,351 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Kandinsky
|
||||
|
||||
## Overview
|
||||
|
||||
Kandinsky 2.1 inherits best practices from [DALL-E 2](https://arxiv.org/abs/2204.06125) and [Latent Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/latent_diffusion), while introducing some new ideas.
|
||||
|
||||
It uses [CLIP](https://huggingface.co/docs/transformers/model_doc/clip) for encoding images and text, and a diffusion image prior (mapping) between latent spaces of CLIP modalities. This approach enhances the visual performance of the model and unveils new horizons in blending images and text-guided image manipulation.
|
||||
|
||||
The Kandinsky model is created by [Arseniy Shakhmatov](https://github.com/cene555), [Anton Razzhigaev](https://github.com/razzant), [Aleksandr Nikolich](https://github.com/AlexWortega), [Igor Pavlov](https://github.com/boomb0om), [Andrey Kuznetsov](https://github.com/kuznetsoffandrey) and [Denis Dimitrov](https://github.com/denndimitrov) and the original codebase can be found [here](https://github.com/ai-forever/Kandinsky-2)
|
||||
|
||||
## Available Pipelines:
|
||||
|
||||
| Pipeline | Tasks |
|
||||
|---|---|
|
||||
| [pipeline_kandinsky.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py) | *Text-to-Image Generation* |
|
||||
| [pipeline_kandinsky_inpaint.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py) | *Image-Guided Image Generation* |
|
||||
| [pipeline_kandinsky_img2img.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py) | *Image-Guided Image Generation* |
|
||||
|
||||
## Usage example
|
||||
|
||||
In the following, we will walk you through some examples of how to use the Kandinsky pipelines to create some visually aesthetic artwork.
|
||||
|
||||
### Text-to-Image Generation
|
||||
|
||||
For text-to-image generation, we need to use both [`KandinskyPriorPipeline`] and [`KandinskyPipeline`].
|
||||
The first step is to encode text prompts with CLIP and then diffuse the CLIP text embeddings to CLIP image embeddings,
|
||||
as first proposed in [DALL-E 2](https://cdn.openai.com/papers/dall-e-2.pdf).
|
||||
Let's throw a fun prompt at Kandinsky to see what it comes up with.
|
||||
|
||||
```py
|
||||
prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting"
|
||||
```
|
||||
|
||||
First, let's instantiate the prior pipeline and the text-to-image pipeline. Both
|
||||
pipelines are diffusion models.
|
||||
|
||||
|
||||
```py
|
||||
from diffusers import DiffusionPipeline
|
||||
import torch
|
||||
|
||||
pipe_prior = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16)
|
||||
pipe_prior.to("cuda")
|
||||
|
||||
t2i_pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)
|
||||
t2i_pipe.to("cuda")
|
||||
```
|
||||
|
||||
Now we pass the prompt through the prior to generate image embeddings. The prior
|
||||
returns both the image embeddings corresponding to the prompt and negative/unconditional image
|
||||
embeddings corresponding to an empty string.
|
||||
|
||||
```py
|
||||
generator = torch.Generator(device="cuda").manual_seed(12)
|
||||
image_embeds, negative_image_embeds = pipe_prior(prompt, generator=generator).to_tuple()
|
||||
```
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The text-to-image pipeline expects both `image_embeds`, `negative_image_embeds` and the original
|
||||
`prompt` as the text-to-image pipeline uses another text encoder to better guide the second diffusion
|
||||
process of `t2i_pipe`.
|
||||
|
||||
By default, the prior returns unconditioned negative image embeddings corresponding to the negative prompt of `""`.
|
||||
For better results, you can also pass a `negative_prompt` to the prior. This will increase the effective batch size
|
||||
of the prior by a factor of 2.
|
||||
|
||||
```py
|
||||
prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting"
|
||||
negative_prompt = "low quality, bad quality"
|
||||
|
||||
image_embeds, negative_image_embeds = pipe_prior(prompt, negative_prompt, generator=generator).to_tuple()
|
||||
```
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
Next, we can pass the embeddings as well as the prompt to the text-to-image pipeline. Remember that
|
||||
in case you are using a customized negative prompt, that you should pass this one also to the text-to-image pipelines
|
||||
with `negative_prompt=negative_prompt`:
|
||||
|
||||
```py
|
||||
image = t2i_pipe(prompt, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds).images[0]
|
||||
image.save("cheeseburger_monster.png")
|
||||
```
|
||||
|
||||
One cheeseburger monster coming up! Enjoy!
|
||||
|
||||

|
||||
|
||||
The Kandinsky model works extremely well with creative prompts. Here is some of the amazing art that can be created using the exact same process but with different prompts.
|
||||
|
||||
```python
|
||||
prompt = "bird eye view shot of a full body woman with cyan light orange magenta makeup, digital art, long braided hair her face separated by makeup in the style of yin Yang surrealism, symmetrical face, real image, contrasting tone, pastel gradient background"
|
||||
```
|
||||

|
||||
|
||||
```python
|
||||
prompt = "A car exploding into colorful dust"
|
||||
```
|
||||

|
||||
|
||||
```python
|
||||
prompt = "editorial photography of an organic, almost liquid smoke style armchair"
|
||||
```
|
||||

|
||||
|
||||
```python
|
||||
prompt = "birds eye view of a quilted paper style alien planet landscape, vibrant colours, Cinematic lighting"
|
||||
```
|
||||

|
||||
|
||||
|
||||
### Text Guided Image-to-Image Generation
|
||||
|
||||
The same Kandinsky model weights can be used for text-guided image-to-image translation. In this case, just make sure to load the weights using the [`KandinskyImg2ImgPipeline`] pipeline.
|
||||
|
||||
**Note**: You can also directly move the weights of the text-to-image pipelines to the image-to-image pipelines
|
||||
without loading them twice by making use of the [`~DiffusionPipeline.components`] function as explained [here](#converting-between-different-pipelines).
|
||||
|
||||
Let's download an image.
|
||||
|
||||
```python
|
||||
from PIL import Image
|
||||
import requests
|
||||
from io import BytesIO
|
||||
|
||||
# download image
|
||||
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
||||
response = requests.get(url)
|
||||
original_image = Image.open(BytesIO(response.content)).convert("RGB")
|
||||
original_image = original_image.resize((768, 512))
|
||||
```
|
||||
|
||||

|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import KandinskyImg2ImgPipeline, KandinskyPriorPipeline
|
||||
|
||||
# create prior
|
||||
pipe_prior = KandinskyPriorPipeline.from_pretrained(
|
||||
"kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16
|
||||
)
|
||||
pipe_prior.to("cuda")
|
||||
|
||||
# create img2img pipeline
|
||||
pipe = KandinskyImg2ImgPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)
|
||||
pipe.to("cuda")
|
||||
|
||||
prompt = "A fantasy landscape, Cinematic lighting"
|
||||
negative_prompt = "low quality, bad quality"
|
||||
|
||||
generator = torch.Generator(device="cuda").manual_seed(30)
|
||||
image_embeds, negative_image_embeds = pipe_prior(prompt, negative_prompt, generator=generator).to_tuple()
|
||||
|
||||
out = pipe(
|
||||
prompt,
|
||||
image=original_image,
|
||||
image_embeds=image_embeds,
|
||||
negative_image_embeds=negative_image_embeds,
|
||||
height=768,
|
||||
width=768,
|
||||
strength=0.3,
|
||||
)
|
||||
|
||||
out.images[0].save("fantasy_land.png")
|
||||
```
|
||||
|
||||

|
||||
|
||||
|
||||
### Text Guided Inpainting Generation
|
||||
|
||||
You can use [`KandinskyInpaintPipeline`] to edit images. In this example, we will add a hat to the portrait of a cat.
|
||||
|
||||
```py
|
||||
from diffusers import KandinskyInpaintPipeline, KandinskyPriorPipeline
|
||||
from diffusers.utils import load_image
|
||||
import torch
|
||||
import numpy as np
|
||||
|
||||
pipe_prior = KandinskyPriorPipeline.from_pretrained(
|
||||
"kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16
|
||||
)
|
||||
pipe_prior.to("cuda")
|
||||
|
||||
prompt = "a hat"
|
||||
prior_output = pipe_prior(prompt)
|
||||
|
||||
pipe = KandinskyInpaintPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16)
|
||||
pipe.to("cuda")
|
||||
|
||||
init_image = load_image(
|
||||
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
|
||||
)
|
||||
|
||||
mask = np.ones((768, 768), dtype=np.float32)
|
||||
# Let's mask out an area above the cat's head
|
||||
mask[:250, 250:-250] = 0
|
||||
|
||||
out = pipe(
|
||||
prompt,
|
||||
image=init_image,
|
||||
mask_image=mask,
|
||||
**prior_output,
|
||||
height=768,
|
||||
width=768,
|
||||
num_inference_steps=150,
|
||||
)
|
||||
|
||||
image = out.images[0]
|
||||
image.save("cat_with_hat.png")
|
||||
```
|
||||

|
||||
|
||||
### Interpolate
|
||||
|
||||
The [`KandinskyPriorPipeline`] also comes with a cool utility function that will allow you to interpolate the latent space of different images and texts super easily. Here is an example of how you can create an Impressionist-style portrait for your pet based on "The Starry Night".
|
||||
|
||||
Note that you can interpolate between texts and images - in the below example, we passed a text prompt "a cat" and two images to the `interplate` function, along with a `weights` variable containing the corresponding weights for each condition we interplate.
|
||||
|
||||
```python
|
||||
from diffusers import KandinskyPriorPipeline, KandinskyPipeline
|
||||
from diffusers.utils import load_image
|
||||
import PIL
|
||||
|
||||
import torch
|
||||
|
||||
pipe_prior = KandinskyPriorPipeline.from_pretrained(
|
||||
"kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16
|
||||
)
|
||||
pipe_prior.to("cuda")
|
||||
|
||||
img1 = load_image(
|
||||
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
|
||||
)
|
||||
|
||||
img2 = load_image(
|
||||
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/starry_night.jpeg"
|
||||
)
|
||||
|
||||
# add all the conditions we want to interpolate, can be either text or image
|
||||
images_texts = ["a cat", img1, img2]
|
||||
|
||||
# specify the weights for each condition in images_texts
|
||||
weights = [0.3, 0.3, 0.4]
|
||||
|
||||
# We can leave the prompt empty
|
||||
prompt = ""
|
||||
prior_out = pipe_prior.interpolate(images_texts, weights)
|
||||
|
||||
pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)
|
||||
pipe.to("cuda")
|
||||
|
||||
image = pipe(prompt, **prior_out, height=768, width=768).images[0]
|
||||
|
||||
image.save("starry_cat.png")
|
||||
```
|
||||

|
||||
|
||||
|
||||
## Optimization
|
||||
|
||||
Running Kandinsky in inference requires running both a first prior pipeline: [`KandinskyPriorPipeline`]
|
||||
and a second image decoding pipeline which is one of [`KandinskyPipeline`], [`KandinskyImg2ImgPipeline`], or [`KandinskyInpaintPipeline`].
|
||||
|
||||
The bulk of the computation time will always be the second image decoding pipeline, so when looking
|
||||
into optimizing the model, one should look into the second image decoding pipeline.
|
||||
|
||||
When running with PyTorch < 2.0, we strongly recommend making use of [`xformers`](https://github.com/facebookresearch/xformers)
|
||||
to speed-up the optimization. This can be done by simply running:
|
||||
|
||||
```py
|
||||
from diffusers import DiffusionPipeline
|
||||
import torch
|
||||
|
||||
t2i_pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)
|
||||
t2i_pipe.enable_xformers_memory_efficient_attention()
|
||||
```
|
||||
|
||||
When running on PyTorch >= 2.0, PyTorch's SDPA attention will automatically be used. For more information on
|
||||
PyTorch's SDPA, feel free to have a look at [this blog post](https://pytorch.org/blog/accelerated-diffusers-pt-20/).
|
||||
|
||||
To have explicit control , you can also manually set the pipeline to use PyTorch's 2.0 efficient attention:
|
||||
|
||||
```py
|
||||
from diffusers.models.attention_processor import AttnAddedKVProcessor2_0
|
||||
|
||||
t2i_pipe.unet.set_attn_processor(AttnAddedKVProcessor2_0())
|
||||
```
|
||||
|
||||
The slowest and most memory intense attention processor is the default `AttnAddedKVProcessor` processor.
|
||||
We do **not** recommend using it except for testing purposes or cases where very high determistic behaviour is desired.
|
||||
You can set it with:
|
||||
|
||||
```py
|
||||
from diffusers.models.attention_processor import AttnAddedKVProcessor
|
||||
|
||||
t2i_pipe.unet.set_attn_processor(AttnAddedKVProcessor())
|
||||
```
|
||||
|
||||
With PyTorch >= 2.0, you can also use Kandinsky with `torch.compile` which depending
|
||||
on your hardware can signficantly speed-up your inference time once the model is compiled.
|
||||
To use Kandinsksy with `torch.compile`, you can do:
|
||||
|
||||
```py
|
||||
t2i_pipe.unet.to(memory_format=torch.channels_last)
|
||||
t2i_pipe.unet = torch.compile(t2i_pipe.unet, mode="reduce-overhead", fullgraph=True)
|
||||
```
|
||||
|
||||
After compilation you should see a very fast inference time. For more information,
|
||||
feel free to have a look at [Our PyTorch 2.0 benchmark](https://huggingface.co/docs/diffusers/main/en/optimization/torch2.0).
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## KandinskyPriorPipeline
|
||||
|
||||
[[autodoc]] KandinskyPriorPipeline
|
||||
- all
|
||||
- __call__
|
||||
- interpolate
|
||||
|
||||
## KandinskyPipeline
|
||||
|
||||
[[autodoc]] KandinskyPipeline
|
||||
- all
|
||||
- __call__
|
||||
|
||||
## KandinskyImg2ImgPipeline
|
||||
|
||||
[[autodoc]] KandinskyImg2ImgPipeline
|
||||
- all
|
||||
- __call__
|
||||
|
||||
## KandinskyInpaintPipeline
|
||||
|
||||
[[autodoc]] KandinskyInpaintPipeline
|
||||
- all
|
||||
- __call__
|
||||
@@ -46,7 +46,7 @@ available a colab notebook to directly try them out.
|
||||
|---|---|:---:|:---:|
|
||||
| [alt_diffusion](./alt_diffusion) | [**AltDiffusion**](https://arxiv.org/abs/2211.06679) | Image-to-Image Text-Guided Generation | -
|
||||
| [audio_diffusion](./audio_diffusion) | [**Audio Diffusion**](https://github.com/teticio/audio_diffusion.git) | Unconditional Audio Generation |
|
||||
| [controlnet](./api/pipelines/stable_diffusion/controlnet) | [**ControlNet with Stable Diffusion**](https://arxiv.org/abs/2302.05543) | Image-to-Image Text-Guided Generation | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/controlnet.ipynb)
|
||||
| [controlnet](./api/pipelines/controlnet) | [**ControlNet with Stable Diffusion**](https://arxiv.org/abs/2302.05543) | Image-to-Image Text-Guided Generation | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/controlnet.ipynb)
|
||||
| [cycle_diffusion](./cycle_diffusion) | [**Cycle Diffusion**](https://arxiv.org/abs/2210.05559) | Image-to-Image Text-Guided Generation |
|
||||
| [dance_diffusion](./dance_diffusion) | [**Dance Diffusion**](https://github.com/williamberman/diffusers.git) | Unconditional Audio Generation |
|
||||
| [ddpm](./ddpm) | [**Denoising Diffusion Probabilistic Models**](https://arxiv.org/abs/2006.11239) | Unconditional Image Generation |
|
||||
@@ -113,105 +113,3 @@ each pipeline, one should look directly into the respective pipeline.
|
||||
|
||||
**Note**: All pipelines have PyTorch's autograd disabled by decorating the `__call__` method with a [`torch.no_grad`](https://pytorch.org/docs/stable/generated/torch.no_grad.html) decorator because pipelines should
|
||||
not be used for training. If you want to store the gradients during the forward pass, we recommend writing your own pipeline, see also our [community-examples](https://github.com/huggingface/diffusers/tree/main/examples/community).
|
||||
|
||||
## Contribution
|
||||
|
||||
We are more than happy about any contribution to the officially supported pipelines 🤗. We aspire
|
||||
all of our pipelines to be **self-contained**, **easy-to-tweak**, **beginner-friendly** and for **one-purpose-only**.
|
||||
|
||||
- **Self-contained**: A pipeline shall be as self-contained as possible. More specifically, this means that all functionality should be either directly defined in the pipeline file itself, should be inherited from (and only from) the [`DiffusionPipeline` class](.../diffusion_pipeline) or be directly attached to the model and scheduler components of the pipeline.
|
||||
- **Easy-to-use**: Pipelines should be extremely easy to use - one should be able to load the pipeline and
|
||||
use it for its designated task, *e.g.* text-to-image generation, in just a couple of lines of code. Most
|
||||
logic including pre-processing, an unrolled diffusion loop, and post-processing should all happen inside the `__call__` method.
|
||||
- **Easy-to-tweak**: Certain pipelines will not be able to handle all use cases and tasks that you might like them to. If you want to use a certain pipeline for a specific use case that is not yet supported, you might have to copy the pipeline file and tweak the code to your needs. We try to make the pipeline code as readable as possible so that each part –from pre-processing to diffusing to post-processing– can easily be adapted. If you would like the community to benefit from your customized pipeline, we would love to see a contribution to our [community-examples](https://github.com/huggingface/diffusers/tree/main/examples/community). If you feel that an important pipeline should be part of the official pipelines but isn't, a contribution to the [official pipelines](./overview) would be even better.
|
||||
- **One-purpose-only**: Pipelines should be used for one task and one task only. Even if two tasks are very similar from a modeling point of view, *e.g.* image2image translation and in-painting, pipelines shall be used for one task only to keep them *easy-to-tweak* and *readable*.
|
||||
|
||||
## Examples
|
||||
|
||||
### Text-to-Image generation with Stable Diffusion
|
||||
|
||||
```python
|
||||
# make sure you're logged in with `huggingface-cli login`
|
||||
from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
image = pipe(prompt).images[0]
|
||||
|
||||
image.save("astronaut_rides_horse.png")
|
||||
```
|
||||
|
||||
### Image-to-Image text-guided generation with Stable Diffusion
|
||||
|
||||
The `StableDiffusionImg2ImgPipeline` lets you pass a text prompt and an initial image to condition the generation of new images.
|
||||
|
||||
```python
|
||||
import requests
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
|
||||
from diffusers import StableDiffusionImg2ImgPipeline
|
||||
|
||||
# load the pipeline
|
||||
device = "cuda"
|
||||
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to(
|
||||
device
|
||||
)
|
||||
|
||||
# let's download an initial image
|
||||
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
||||
|
||||
response = requests.get(url)
|
||||
init_image = Image.open(BytesIO(response.content)).convert("RGB")
|
||||
init_image = init_image.resize((768, 512))
|
||||
|
||||
prompt = "A fantasy landscape, trending on artstation"
|
||||
|
||||
images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images
|
||||
|
||||
images[0].save("fantasy_landscape.png")
|
||||
```
|
||||
You can also run this example on colab [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/image_2_image_using_diffusers.ipynb)
|
||||
|
||||
### Tweak prompts reusing seeds and latents
|
||||
|
||||
You can generate your own latents to reproduce results, or tweak your prompt on a specific result you liked. [This notebook](https://github.com/pcuenca/diffusers-examples/blob/main/notebooks/stable-diffusion-seeds.ipynb) shows how to do it step by step. You can also run it in Google Colab [](https://colab.research.google.com/github/pcuenca/diffusers-examples/blob/main/notebooks/stable-diffusion-seeds.ipynb)
|
||||
|
||||
|
||||
### In-painting using Stable Diffusion
|
||||
|
||||
The `StableDiffusionInpaintPipeline` lets you edit specific parts of an image by providing a mask and text prompt.
|
||||
|
||||
```python
|
||||
import PIL
|
||||
import requests
|
||||
import torch
|
||||
from io import BytesIO
|
||||
|
||||
from diffusers import StableDiffusionInpaintPipeline
|
||||
|
||||
|
||||
def download_image(url):
|
||||
response = requests.get(url)
|
||||
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
|
||||
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
|
||||
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
|
||||
|
||||
init_image = download_image(img_url).resize((512, 512))
|
||||
mask_image = download_image(mask_url).resize((512, 512))
|
||||
|
||||
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-inpainting",
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
|
||||
image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
|
||||
```
|
||||
|
||||
You can also run this example on colab [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb)
|
||||
|
||||
@@ -52,6 +52,14 @@ image = pipe(prompt).images[0]
|
||||
image.save("dolomites.png")
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
While calling this pipeline, it's possible to specify the `view_batch_size` to have a >1 value.
|
||||
For some GPUs with high performance, higher a `view_batch_size`, can speedup the generation
|
||||
and increase the VRAM usage.
|
||||
|
||||
</Tip>
|
||||
|
||||
## StableDiffusionPanoramaPipeline
|
||||
[[autodoc]] StableDiffusionPanoramaPipeline
|
||||
- __call__
|
||||
@@ -60,7 +60,7 @@ pipe = pipe.to("cuda")
|
||||
|
||||
generator = torch.Generator(device="cuda").manual_seed(0)
|
||||
output = pipe(
|
||||
original_image=original_image,
|
||||
image=original_image,
|
||||
mask_image=mask_image,
|
||||
num_inference_steps=250,
|
||||
eta=0.0,
|
||||
|
||||
@@ -36,6 +36,7 @@ For more details about how Stable Diffusion works and how it differs from the ba
|
||||
| [StableDiffusionAttendAndExcitePipeline](./attend_and_excite) | **Experimental** – *Text-to-Image Generation * | | [Attend-and-Excite: Attention-Based Semantic Guidance for Text-to-Image Diffusion Models](https://huggingface.co/spaces/AttendAndExcite/Attend-and-Excite)
|
||||
| [StableDiffusionPix2PixZeroPipeline](./pix2pix_zero) | **Experimental** – *Text-Based Image Editing * | | [Zero-shot Image-to-Image Translation](https://arxiv.org/abs/2302.03027)
|
||||
| [StableDiffusionModelEditingPipeline](./model_editing) | **Experimental** – *Text-to-Image Model Editing * | | [Editing Implicit Assumptions in Text-to-Image Diffusion Models](https://arxiv.org/abs/2303.08084)
|
||||
| [StableDiffusionDiffEditPipeline](./diffedit) | **Experimental** – *Text-Based Image Editing * | | [DiffEdit: Diffusion-based semantic image editing with mask guidance](https://arxiv.org/abs/2210.11427)
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -71,6 +71,64 @@ image = pipe(prompt, guidance_scale=9, num_inference_steps=25).images[0]
|
||||
image.save("astronaut.png")
|
||||
```
|
||||
|
||||
#### Experimental: "Common Diffusion Noise Schedules and Sample Steps are Flawed":
|
||||
|
||||
The paper **[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/abs/2305.08891)**
|
||||
claims that a mismatch between the training and inference settings leads to suboptimal inference generation results for Stable Diffusion.
|
||||
|
||||
The abstract reads as follows:
|
||||
|
||||
*We discover that common diffusion noise schedules do not enforce the last timestep to have zero signal-to-noise ratio (SNR),
|
||||
and some implementations of diffusion samplers do not start from the last timestep.
|
||||
Such designs are flawed and do not reflect the fact that the model is given pure Gaussian noise at inference, creating a discrepancy between training and inference.
|
||||
We show that the flawed design causes real problems in existing implementations.
|
||||
In Stable Diffusion, it severely limits the model to only generate images with medium brightness and
|
||||
prevents it from generating very bright and dark samples. We propose a few simple fixes:
|
||||
- (1) rescale the noise schedule to enforce zero terminal SNR;
|
||||
- (2) train the model with v prediction;
|
||||
- (3) change the sampler to always start from the last timestep;
|
||||
- (4) rescale classifier-free guidance to prevent over-exposure.
|
||||
These simple changes ensure the diffusion process is congruent between training and inference and
|
||||
allow the model to generate samples more faithful to the original data distribution.*
|
||||
|
||||
You can apply all of these changes in `diffusers` when using [`DDIMScheduler`]:
|
||||
- (1) rescale the noise schedule to enforce zero terminal SNR;
|
||||
```py
|
||||
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, rescale_betas_zero_snr=True)
|
||||
```
|
||||
- (2) train the model with v prediction;
|
||||
Continue fine-tuning a checkpoint with [`train_text_to_image.py`](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) or [`train_text_to_image_lora.py`](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py)
|
||||
and `--prediction_type="v_prediction"`.
|
||||
- (3) change the sampler to always start from the last timestep;
|
||||
```py
|
||||
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, timestep_scaling="trailing")
|
||||
```
|
||||
- (4) rescale classifier-free guidance to prevent over-exposure.
|
||||
```py
|
||||
pipe(..., guidance_rescale=0.7)
|
||||
```
|
||||
|
||||
An example is to use [this checkpoint](https://huggingface.co/ptx0/pseudo-journey-v2)
|
||||
which has been fine-tuned using the `"v_prediction"`.
|
||||
|
||||
The checkpoint can then be run in inference as follows:
|
||||
|
||||
```py
|
||||
from diffusers import DiffusionPipeline, DDIMScheduler
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained("ptx0/pseudo-journey-v2", torch_dtype=torch.float16)
|
||||
pipe.scheduler = DDIMScheduler.from_config(
|
||||
pipe.scheduler.config, rescale_betas_zero_snr=True, timestep_scaling="trailing"
|
||||
)
|
||||
pipe.to("cuda")
|
||||
|
||||
prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k"
|
||||
image = pipeline(prompt, guidance_rescale=0.7).images[0]
|
||||
```
|
||||
|
||||
## DDIMScheduler
|
||||
[[autodoc]] DDIMScheduler
|
||||
|
||||
### Image Inpainting
|
||||
|
||||
- *Image Inpainting (512x512 resolution)*: [stabilityai/stable-diffusion-2-inpainting](https://huggingface.co/stabilityai/stable-diffusion-2-inpainting) with [`StableDiffusionInpaintPipeline`]
|
||||
204
docs/source/en/api/pipelines/unidiffuser.mdx
Normal file
204
docs/source/en/api/pipelines/unidiffuser.mdx
Normal file
@@ -0,0 +1,204 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# UniDiffuser
|
||||
|
||||
The UniDiffuser model was proposed in [One Transformer Fits All Distributions in Multi-Modal Diffusion at Scale](https://arxiv.org/abs/2303.06555) by Fan Bao, Shen Nie, Kaiwen Xue, Chongxuan Li, Shi Pu, Yaole Wang, Gang Yue, Yue Cao, Hang Su, Jun Zhu.
|
||||
|
||||
The abstract of the [paper](https://arxiv.org/abs/2303.06555) is the following:
|
||||
|
||||
*This paper proposes a unified diffusion framework (dubbed UniDiffuser) to fit all distributions relevant to a set of multi-modal data in one model. Our key insight is -- learning diffusion models for marginal, conditional, and joint distributions can be unified as predicting the noise in the perturbed data, where the perturbation levels (i.e. timesteps) can be different for different modalities. Inspired by the unified view, UniDiffuser learns all distributions simultaneously with a minimal modification to the original diffusion model -- perturbs data in all modalities instead of a single modality, inputs individual timesteps in different modalities, and predicts the noise of all modalities instead of a single modality. UniDiffuser is parameterized by a transformer for diffusion models to handle input types of different modalities. Implemented on large-scale paired image-text data, UniDiffuser is able to perform image, text, text-to-image, image-to-text, and image-text pair generation by setting proper timesteps without additional overhead. In particular, UniDiffuser is able to produce perceptually realistic samples in all tasks and its quantitative results (e.g., the FID and CLIP score) are not only superior to existing general-purpose models but also comparable to the bespoken models (e.g., Stable Diffusion and DALL-E 2) in representative tasks (e.g., text-to-image generation).*
|
||||
|
||||
Resources:
|
||||
|
||||
* [Paper](https://arxiv.org/abs/2303.06555).
|
||||
* [Original Code](https://github.com/thu-ml/unidiffuser).
|
||||
|
||||
Available Checkpoints are:
|
||||
- *UniDiffuser-v0 (512x512 resolution)* [thu-ml/unidiffuser-v0](https://huggingface.co/thu-ml/unidiffuser-v0)
|
||||
- *UniDiffuser-v1 (512x512 resolution)* [thu-ml/unidiffuser-v1](https://huggingface.co/thu-ml/unidiffuser-v1)
|
||||
|
||||
This pipeline was contributed by our community member [dg845](https://github.com/dg845).
|
||||
|
||||
## Available Pipelines:
|
||||
|
||||
| Pipeline | Tasks | Demo | Colab |
|
||||
|:---:|:---:|:---:|:---:|
|
||||
| [UniDiffuserPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_unidiffuser.py) | *Joint Image-Text Gen*, *Text-to-Image*, *Image-to-Text*,<br> *Image Gen*, *Text Gen*, *Image Variation*, *Text Variation* | [🤗 Spaces](https://huggingface.co/spaces/thu-ml/unidiffuser) | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/unidiffuser.ipynb) |
|
||||
|
||||
## Usage Examples
|
||||
|
||||
Because the UniDiffuser model is trained to model the joint distribution of (image, text) pairs, it is capable of performing a diverse range of generation tasks.
|
||||
|
||||
### Unconditional Image and Text Generation
|
||||
|
||||
Unconditional generation (where we start from only latents sampled from a standard Gaussian prior) from a [`UniDiffuserPipeline`] will produce a (image, text) pair:
|
||||
|
||||
```python
|
||||
import torch
|
||||
|
||||
from diffusers import UniDiffuserPipeline
|
||||
|
||||
device = "cuda"
|
||||
model_id_or_path = "thu-ml/unidiffuser-v1"
|
||||
pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
|
||||
pipe.to(device)
|
||||
|
||||
# Unconditional image and text generation. The generation task is automatically inferred.
|
||||
sample = pipe(num_inference_steps=20, guidance_scale=8.0)
|
||||
image = sample.images[0]
|
||||
text = sample.text[0]
|
||||
image.save("unidiffuser_joint_sample_image.png")
|
||||
print(text)
|
||||
```
|
||||
|
||||
This is also called "joint" generation in the UniDiffusers paper, since we are sampling from the joint image-text distribution.
|
||||
|
||||
Note that the generation task is inferred from the inputs used when calling the pipeline.
|
||||
It is also possible to manually specify the unconditional generation task ("mode") manually with [`UniDiffuserPipeline.set_joint_mode`]:
|
||||
|
||||
```python
|
||||
# Equivalent to the above.
|
||||
pipe.set_joint_mode()
|
||||
sample = pipe(num_inference_steps=20, guidance_scale=8.0)
|
||||
```
|
||||
|
||||
When the mode is set manually, subsequent calls to the pipeline will use the set mode without attempting the infer the mode.
|
||||
You can reset the mode with [`UniDiffuserPipeline.reset_mode`], after which the pipeline will once again infer the mode.
|
||||
|
||||
You can also generate only an image or only text (which the UniDiffuser paper calls "marginal" generation since we sample from the marginal distribution of images and text, respectively):
|
||||
|
||||
```python
|
||||
# Unlike other generation tasks, image-only and text-only generation don't use classifier-free guidance
|
||||
# Image-only generation
|
||||
pipe.set_image_mode()
|
||||
sample_image = pipe(num_inference_steps=20).images[0]
|
||||
# Text-only generation
|
||||
pipe.set_text_mode()
|
||||
sample_text = pipe(num_inference_steps=20).text[0]
|
||||
```
|
||||
|
||||
### Text-to-Image Generation
|
||||
|
||||
UniDiffuser is also capable of sampling from conditional distributions; that is, the distribution of images conditioned on a text prompt or the distribution of texts conditioned on an image.
|
||||
Here is an example of sampling from the conditional image distribution (text-to-image generation or text-conditioned image generation):
|
||||
|
||||
```python
|
||||
import torch
|
||||
|
||||
from diffusers import UniDiffuserPipeline
|
||||
|
||||
device = "cuda"
|
||||
model_id_or_path = "thu-ml/unidiffuser-v1"
|
||||
pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
|
||||
pipe.to(device)
|
||||
|
||||
# Text-to-image generation
|
||||
prompt = "an elephant under the sea"
|
||||
|
||||
sample = pipe(prompt=prompt, num_inference_steps=20, guidance_scale=8.0)
|
||||
t2i_image = sample.images[0]
|
||||
t2i_image.save("unidiffuser_text2img_sample_image.png")
|
||||
```
|
||||
|
||||
The `text2img` mode requires that either an input `prompt` or `prompt_embeds` be supplied. You can set the `text2img` mode manually with [`UniDiffuserPipeline.set_text_to_image_mode`].
|
||||
|
||||
### Image-to-Text Generation
|
||||
|
||||
Similarly, UniDiffuser can also produce text samples given an image (image-to-text or image-conditioned text generation):
|
||||
|
||||
```python
|
||||
import torch
|
||||
|
||||
from diffusers import UniDiffuserPipeline
|
||||
from diffusers.utils import load_image
|
||||
|
||||
device = "cuda"
|
||||
model_id_or_path = "thu-ml/unidiffuser-v1"
|
||||
pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
|
||||
pipe.to(device)
|
||||
|
||||
# Image-to-text generation
|
||||
image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg"
|
||||
init_image = load_image(image_url).resize((512, 512))
|
||||
|
||||
sample = pipe(image=init_image, num_inference_steps=20, guidance_scale=8.0)
|
||||
i2t_text = sample.text[0]
|
||||
print(i2t_text)
|
||||
```
|
||||
|
||||
The `img2text` mode requires that an input `image` be supplied. You can set the `img2text` mode manually with [`UniDiffuserPipeline.set_image_to_text_mode`].
|
||||
|
||||
### Image Variation
|
||||
|
||||
The UniDiffuser authors suggest performing image variation through a "round-trip" generation method, where given an input image, we first perform an image-to-text generation, and the perform a text-to-image generation on the outputs of the first generation.
|
||||
This produces a new image which is semantically similar to the input image:
|
||||
|
||||
```python
|
||||
import torch
|
||||
|
||||
from diffusers import UniDiffuserPipeline
|
||||
from diffusers.utils import load_image
|
||||
|
||||
device = "cuda"
|
||||
model_id_or_path = "thu-ml/unidiffuser-v1"
|
||||
pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
|
||||
pipe.to(device)
|
||||
|
||||
# Image variation can be performed with a image-to-text generation followed by a text-to-image generation:
|
||||
# 1. Image-to-text generation
|
||||
image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg"
|
||||
init_image = load_image(image_url).resize((512, 512))
|
||||
|
||||
sample = pipe(image=init_image, num_inference_steps=20, guidance_scale=8.0)
|
||||
i2t_text = sample.text[0]
|
||||
print(i2t_text)
|
||||
|
||||
# 2. Text-to-image generation
|
||||
sample = pipe(prompt=i2t_text, num_inference_steps=20, guidance_scale=8.0)
|
||||
final_image = sample.images[0]
|
||||
final_image.save("unidiffuser_image_variation_sample.png")
|
||||
```
|
||||
|
||||
### Text Variation
|
||||
|
||||
|
||||
Similarly, text variation can be performed on an input prompt with a text-to-image generation followed by a image-to-text generation:
|
||||
|
||||
```python
|
||||
import torch
|
||||
|
||||
from diffusers import UniDiffuserPipeline
|
||||
|
||||
device = "cuda"
|
||||
model_id_or_path = "thu-ml/unidiffuser-v1"
|
||||
pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
|
||||
pipe.to(device)
|
||||
|
||||
# Text variation can be performed with a text-to-image generation followed by a image-to-text generation:
|
||||
# 1. Text-to-image generation
|
||||
prompt = "an elephant under the sea"
|
||||
|
||||
sample = pipe(prompt=prompt, num_inference_steps=20, guidance_scale=8.0)
|
||||
t2i_image = sample.images[0]
|
||||
t2i_image.save("unidiffuser_text2img_sample_image.png")
|
||||
|
||||
# 2. Image-to-text generation
|
||||
sample = pipe(image=t2i_image, num_inference_steps=20, guidance_scale=8.0)
|
||||
final_prompt = sample.text[0]
|
||||
print(final_prompt)
|
||||
```
|
||||
|
||||
## UniDiffuserPipeline
|
||||
[[autodoc]] UniDiffuserPipeline
|
||||
- all
|
||||
- __call__
|
||||
@@ -18,10 +18,71 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
The abstract of the paper is the following:
|
||||
|
||||
Denoising diffusion probabilistic models (DDPMs) have achieved high quality image generation without adversarial training, yet they require simulating a Markov chain for many steps to produce a sample. To accelerate sampling, we present denoising diffusion implicit models (DDIMs), a more efficient class of iterative implicit probabilistic models with the same training procedure as DDPMs. In DDPMs, the generative process is defined as the reverse of a Markovian diffusion process. We construct a class of non-Markovian diffusion processes that lead to the same training objective, but whose reverse process can be much faster to sample from. We empirically demonstrate that DDIMs can produce high quality samples 10× to 50× faster in terms of wall-clock time compared to DDPMs, allow us to trade off computation for sample quality, and can perform semantically meaningful image interpolation directly in the latent space.
|
||||
*Denoising diffusion probabilistic models (DDPMs) have achieved high quality image generation without adversarial training,
|
||||
yet they require simulating a Markov chain for many steps to produce a sample.
|
||||
To accelerate sampling, we present denoising diffusion implicit models (DDIMs), a more efficient class of iterative implicit probabilistic models
|
||||
with the same training procedure as DDPMs. In DDPMs, the generative process is defined as the reverse of a Markovian diffusion process.
|
||||
We construct a class of non-Markovian diffusion processes that lead to the same training objective, but whose reverse process can be much faster to sample from.
|
||||
We empirically demonstrate that DDIMs can produce high quality samples 10× to 50× faster in terms of wall-clock time compared to DDPMs, allow us to trade off
|
||||
computation for sample quality, and can perform semantically meaningful image interpolation directly in the latent space.*
|
||||
|
||||
The original codebase of this paper can be found here: [ermongroup/ddim](https://github.com/ermongroup/ddim).
|
||||
For questions, feel free to contact the author on [tsong.me](https://tsong.me/).
|
||||
|
||||
### Experimental: "Common Diffusion Noise Schedules and Sample Steps are Flawed":
|
||||
|
||||
The paper **[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/abs/2305.08891)**
|
||||
claims that a mismatch between the training and inference settings leads to suboptimal inference generation results for Stable Diffusion.
|
||||
|
||||
The abstract reads as follows:
|
||||
|
||||
*We discover that common diffusion noise schedules do not enforce the last timestep to have zero signal-to-noise ratio (SNR),
|
||||
and some implementations of diffusion samplers do not start from the last timestep.
|
||||
Such designs are flawed and do not reflect the fact that the model is given pure Gaussian noise at inference, creating a discrepancy between training and inference.
|
||||
We show that the flawed design causes real problems in existing implementations.
|
||||
In Stable Diffusion, it severely limits the model to only generate images with medium brightness and
|
||||
prevents it from generating very bright and dark samples. We propose a few simple fixes:
|
||||
- (1) rescale the noise schedule to enforce zero terminal SNR;
|
||||
- (2) train the model with v prediction;
|
||||
- (3) change the sampler to always start from the last timestep;
|
||||
- (4) rescale classifier-free guidance to prevent over-exposure.
|
||||
These simple changes ensure the diffusion process is congruent between training and inference and
|
||||
allow the model to generate samples more faithful to the original data distribution.*
|
||||
|
||||
You can apply all of these changes in `diffusers` when using [`DDIMScheduler`]:
|
||||
- (1) rescale the noise schedule to enforce zero terminal SNR;
|
||||
```py
|
||||
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, rescale_betas_zero_snr=True)
|
||||
```
|
||||
- (2) train the model with v prediction;
|
||||
Continue fine-tuning a checkpoint with [`train_text_to_image.py`](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) or [`train_text_to_image_lora.py`](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py)
|
||||
and `--prediction_type="v_prediction"`.
|
||||
- (3) change the sampler to always start from the last timestep;
|
||||
```py
|
||||
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, timestep_scaling="trailing")
|
||||
```
|
||||
- (4) rescale classifier-free guidance to prevent over-exposure.
|
||||
```py
|
||||
pipe(..., guidance_rescale=0.7)
|
||||
```
|
||||
|
||||
An example is to use [this checkpoint](https://huggingface.co/ptx0/pseudo-journey-v2)
|
||||
which has been fine-tuned using the `"v_prediction"`.
|
||||
|
||||
The checkpoint can then be run in inference as follows:
|
||||
|
||||
```py
|
||||
from diffusers import DiffusionPipeline, DDIMScheduler
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained("ptx0/pseudo-journey-v2", torch_dtype=torch.float16)
|
||||
pipe.scheduler = DDIMScheduler.from_config(
|
||||
pipe.scheduler.config, rescale_betas_zero_snr=True, timestep_scaling="trailing"
|
||||
)
|
||||
pipe.to("cuda")
|
||||
|
||||
prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k"
|
||||
image = pipeline(prompt, guidance_rescale=0.7).images[0]
|
||||
```
|
||||
|
||||
## DDIMScheduler
|
||||
[[autodoc]] DDIMScheduler
|
||||
|
||||
23
docs/source/en/api/schedulers/dpm_sde.mdx
Normal file
23
docs/source/en/api/schedulers/dpm_sde.mdx
Normal file
@@ -0,0 +1,23 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# DPM Stochastic Scheduler inspired by Karras et. al paper
|
||||
|
||||
## Overview
|
||||
|
||||
Inspired by Stochastic Sampler from [Karras et. al](https://arxiv.org/abs/2206.00364).
|
||||
Scheduler ported from @crowsonkb's https://github.com/crowsonkb/k-diffusion library:
|
||||
|
||||
All credit for making this scheduler work goes to [Katherine Crowson](https://github.com/crowsonkb/)
|
||||
|
||||
## DPMSolverSDEScheduler
|
||||
[[autodoc]] DPMSolverSDEScheduler
|
||||
@@ -0,0 +1,22 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Inverse Multistep DPM-Solver (DPMSolverMultistepInverse)
|
||||
|
||||
## Overview
|
||||
|
||||
This scheduler is the inverted scheduler of [DPM-Solver: A Fast ODE Solver for Diffusion Probabilistic Model Sampling in Around 10 Steps](https://arxiv.org/abs/2206.00927) and [DPM-Solver++: Fast Solver for Guided Sampling of Diffusion Probabilistic Models
|
||||
](https://arxiv.org/abs/2211.01095) by Cheng Lu, Yuhao Zhou, Fan Bao, Jianfei Chen, Chongxuan Li, and Jun Zhu.
|
||||
The implementation is mostly based on the DDIM inversion definition of [Null-text Inversion for Editing Real Images using Guided Diffusion Models](https://arxiv.org/pdf/2211.09794.pdf) and the ad-hoc notebook implementation for DiffEdit latent inversion [here](https://github.com/Xiang-cd/DiffEdit-stable-diffusion/blob/main/diffedit.ipynb).
|
||||
|
||||
## DPMSolverMultistepInverseScheduler
|
||||
[[autodoc]] DPMSolverMultistepInverseScheduler
|
||||
23
docs/source/en/api/utilities.mdx
Normal file
23
docs/source/en/api/utilities.mdx
Normal file
@@ -0,0 +1,23 @@
|
||||
# Utilities
|
||||
|
||||
Utility and helper functions for working with 🤗 Diffusers.
|
||||
|
||||
## randn_tensor
|
||||
|
||||
[[autodoc]] diffusers.utils.randn_tensor
|
||||
|
||||
## numpy_to_pil
|
||||
|
||||
[[autodoc]] utils.pil_utils.numpy_to_pil
|
||||
|
||||
## pt_to_pil
|
||||
|
||||
[[autodoc]] utils.pil_utils.pt_to_pil
|
||||
|
||||
## load_image
|
||||
|
||||
[[autodoc]] utils.testing_utils.load_image
|
||||
|
||||
## export_to_video
|
||||
|
||||
[[autodoc]] utils.testing_utils.export_to_video
|
||||
@@ -14,7 +14,7 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
We ❤️ contributions from the open-source community! Everyone is welcome, and all types of participation –not just code– are valued and appreciated. Answering questions, helping others, reaching out, and improving the documentation are all immensely valuable to the community, so don't be afraid and get involved if you're up for it!
|
||||
|
||||
Everyone is encouraged to start by saying 👋 in our public Discord channel. We discuss the latest trends in diffusion models, ask questions, show off personal projects, help each other with contributions, or just hang out ☕. <a href="https://Discord.gg/G7tWnz98XR"><img alt="Join us on Discord" src="https://img.shields.io/Discord/823813159592001537?color=5865F2&logo=Discord&logoColor=white"></a>
|
||||
Everyone is encouraged to start by saying 👋 in our public Discord channel. We discuss the latest trends in diffusion models, ask questions, show off personal projects, help each other with contributions, or just hang out ☕. <a href="https://Discord.gg/G7tWnz98XR"><img alt="Join us on Discord" src="https://img.shields.io/discord/823813159592001537?color=5865F2&logo=discord&logoColor=white"></a>
|
||||
|
||||
Whichever way you choose to contribute, we strive to be part of an open, welcoming, and kind community. Please, read our [code of conduct](https://github.com/huggingface/diffusers/blob/main/CODE_OF_CONDUCT.md) and be mindful to respect it during your interactions. We also recommend you become familiar with the [ethical guidelines](https://huggingface.co/docs/diffusers/conceptual/ethical_guidelines) that guide our project and ask you to adhere to the same principles of transparency and responsibility.
|
||||
|
||||
|
||||
@@ -37,7 +37,8 @@ We cover Diffusion models with the following pipelines:
|
||||
|
||||
## Qualitative Evaluation
|
||||
|
||||
Qualitative evaluation typically involves human assessment of generated images. Quality is measured across aspects such as compositionality, image-text alignment, and spatial relations. Common prompts provide a degree of uniformity for subjective metrics. DrawBench and PartiPrompts are prompt datasets used for qualitative benchmarking. DrawBench and PartiPrompts were introduced by [Imagen](https://imagen.research.google/) and [Parti](https://parti.research.google/) respectively.
|
||||
Qualitative evaluation typically involves human assessment of generated images. Quality is measured across aspects such as compositionality, image-text alignment, and spatial relations. Common prompts provide a degree of uniformity for subjective metrics.
|
||||
DrawBench and PartiPrompts are prompt datasets used for qualitative benchmarking. DrawBench and PartiPrompts were introduced by [Imagen](https://imagen.research.google/) and [Parti](https://parti.research.google/) respectively.
|
||||
|
||||
From the [official Parti website](https://parti.research.google/):
|
||||
|
||||
@@ -51,7 +52,13 @@ PartiPrompts has the following columns:
|
||||
- Category of the prompt (such as “Abstract”, “World Knowledge”, etc.)
|
||||
- Challenge reflecting the difficulty (such as “Basic”, “Complex”, “Writing & Symbols”, etc.)
|
||||
|
||||
These benchmarks allow for side-by-side human evaluation of different image generation models. Let’s see how we can use `diffusers` on a couple of PartiPrompts.
|
||||
These benchmarks allow for side-by-side human evaluation of different image generation models.
|
||||
|
||||
For this, the 🧨 Diffusers team has built **Open Parti Prompts**, which is a community-driven qualitative benchmark based on Parti Prompts to compare state-of-the-art open-source diffusion models:
|
||||
- [Open Parti Prompts Game](https://huggingface.co/spaces/OpenGenAI/open-parti-prompts): For 10 parti prompts, 4 generated images are shown and the user selects the image that suits the prompt best.
|
||||
- [Open Parti Prompts Leaderboard](https://huggingface.co/spaces/OpenGenAI/parti-prompts-leaderboard): The leaderboard comparing the currently best open-sourced diffusion models to each other.
|
||||
|
||||
To manually compare images, let’s see how we can use `diffusers` on a couple of PartiPrompts.
|
||||
|
||||
Below we show some prompts sampled across different challenges: Basic, Complex, Linguistic Structures, Imagination, and Writing & Symbols. Here we are using PartiPrompts as a [dataset](https://huggingface.co/datasets/nateraw/parti-prompts).
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ The library has three main components:
|
||||
|---|---|:---:|
|
||||
| [alt_diffusion](./api/pipelines/alt_diffusion) | [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679) | Image-to-Image Text-Guided Generation |
|
||||
| [audio_diffusion](./api/pipelines/audio_diffusion) | [Audio Diffusion](https://github.com/teticio/audio-diffusion.git) | Unconditional Audio Generation |
|
||||
| [controlnet](./api/pipelines/stable_diffusion/controlnet) | [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) | Image-to-Image Text-Guided Generation |
|
||||
| [controlnet](./api/pipelines/controlnet) | [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) | Image-to-Image Text-Guided Generation |
|
||||
| [cycle_diffusion](./api/pipelines/cycle_diffusion) | [Unifying Diffusion Models' Latent Space, with Applications to CycleDiffusion and Guidance](https://arxiv.org/abs/2210.05559) | Image-to-Image Text-Guided Generation |
|
||||
| [dance_diffusion](./api/pipelines/dance_diffusion) | [Dance Diffusion](https://github.com/williamberman/diffusers.git) | Unconditional Audio Generation |
|
||||
| [ddpm](./api/pipelines/ddpm) | [Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2006.11239) | Unconditional Image Generation |
|
||||
|
||||
@@ -12,9 +12,9 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Installation
|
||||
|
||||
Install 🤗 Diffusers for whichever deep learning library you’re working with.
|
||||
Install 🤗 Diffusers for whichever deep learning library you're working with.
|
||||
|
||||
🤗 Diffusers is tested on Python 3.7+, PyTorch 1.7.0+ and flax. Follow the installation instructions below for the deep learning library you are using:
|
||||
🤗 Diffusers is tested on Python 3.7+, PyTorch 1.7.0+ and Flax. Follow the installation instructions below for the deep learning library you are using:
|
||||
|
||||
- [PyTorch](https://pytorch.org/get-started/locally/) installation instructions.
|
||||
- [Flax](https://flax.readthedocs.io/en/latest/) installation instructions.
|
||||
@@ -37,27 +37,28 @@ Activate the virtual environment:
|
||||
source .env/bin/activate
|
||||
```
|
||||
|
||||
Now you're ready to install 🤗 Diffusers with the following command:
|
||||
|
||||
**For PyTorch**
|
||||
🤗 Diffusers also relies on the 🤗 Transformers library, and you can install both with the following command:
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```bash
|
||||
pip install diffusers["torch"]
|
||||
pip install diffusers["torch"] transformers
|
||||
```
|
||||
|
||||
**For Flax**
|
||||
|
||||
</pt>
|
||||
<jax>
|
||||
```bash
|
||||
pip install diffusers["flax"]
|
||||
pip install diffusers["flax"] transformers
|
||||
```
|
||||
</jax>
|
||||
</frameworkcontent>
|
||||
|
||||
## Install from source
|
||||
|
||||
Before intsalling `diffusers` from source, make sure you have `torch` and `accelerate` installed.
|
||||
Before installing 🤗 Diffusers from source, make sure you have `torch` and 🤗 Accelerate installed.
|
||||
|
||||
For `torch` installation refer to the `torch` [docs](https://pytorch.org/get-started/locally/#start-locally).
|
||||
For `torch` installation, refer to the `torch` [installation](https://pytorch.org/get-started/locally/#start-locally) guide.
|
||||
|
||||
To install `accelerate`
|
||||
To install 🤗 Accelerate:
|
||||
|
||||
```bash
|
||||
pip install accelerate
|
||||
@@ -74,7 +75,7 @@ The `main` version is useful for staying up-to-date with the latest developments
|
||||
For instance, if a bug has been fixed since the last official release but a new release hasn't been rolled out yet.
|
||||
However, this means the `main` version may not always be stable.
|
||||
We strive to keep the `main` version operational, and most issues are usually resolved within a few hours or a day.
|
||||
If you run into a problem, please open an [Issue](https://github.com/huggingface/transformers/issues), so we can fix it even sooner!
|
||||
If you run into a problem, please open an [Issue](https://github.com/huggingface/diffusers/issues/new/choose), so we can fix it even sooner!
|
||||
|
||||
## Editable install
|
||||
|
||||
@@ -90,21 +91,22 @@ git clone https://github.com/huggingface/diffusers.git
|
||||
cd diffusers
|
||||
```
|
||||
|
||||
**For PyTorch**
|
||||
|
||||
```
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```bash
|
||||
pip install -e ".[torch]"
|
||||
```
|
||||
|
||||
**For Flax**
|
||||
|
||||
```
|
||||
</pt>
|
||||
<jax>
|
||||
```bash
|
||||
pip install -e ".[flax]"
|
||||
```
|
||||
</jax>
|
||||
</frameworkcontent>
|
||||
|
||||
These commands will link the folder you cloned the repository to and your Python library paths.
|
||||
Python will now look inside the folder you cloned to in addition to the normal library paths.
|
||||
For example, if your Python packages are typically installed in `~/anaconda3/envs/main/lib/python3.7/site-packages/`, Python will also search the folder you cloned to: `~/diffusers/`.
|
||||
For example, if your Python packages are typically installed in `~/anaconda3/envs/main/lib/python3.7/site-packages/`, Python will also search the `~/diffusers/` folder you cloned to.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
|
||||
@@ -50,7 +50,6 @@ from diffusers import DiffusionPipeline
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
pipe = pipe.to("cuda")
|
||||
@@ -60,8 +59,10 @@ image = pipe(prompt).images[0]
|
||||
```
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
It is strongly discouraged to make use of [`torch.autocast`](https://pytorch.org/docs/stable/amp.html#torch.autocast) in any of the pipelines as it can lead to black images and is always slower than using pure
|
||||
float16 precision.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Sliced attention for additional memory savings
|
||||
@@ -83,7 +84,6 @@ from diffusers import DiffusionPipeline
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
pipe = pipe.to("cuda")
|
||||
@@ -110,7 +110,6 @@ from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
pipe = pipe.to("cuda")
|
||||
@@ -164,7 +163,6 @@ from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
|
||||
@@ -189,7 +187,6 @@ from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
|
||||
@@ -202,6 +199,8 @@ image = pipe(prompt).images[0]
|
||||
|
||||
**Note**: When using `enable_sequential_cpu_offload()`, it is important to **not** move the pipeline to CUDA beforehand or else the gain in memory consumption will only be minimal. See [this issue](https://github.com/huggingface/diffusers/issues/1934) for more information.
|
||||
|
||||
**Note**: `enable_sequential_cpu_offload()` is a stateful operation that installs hooks on the models.
|
||||
|
||||
|
||||
<a name="model_offloading"></a>
|
||||
## Model offloading for fast inference and memory savings
|
||||
@@ -251,6 +250,11 @@ image = pipe(prompt).images[0]
|
||||
This feature requires `accelerate` version 0.17.0 or larger.
|
||||
</Tip>
|
||||
|
||||
**Note**: `enable_model_cpu_offload()` is a stateful operation that installs hooks on the models and state on the pipeline. In order to properly offload
|
||||
models after they are called, it is required that the entire pipeline is run and models are called in the order the pipeline expects them to be. Exercise caution
|
||||
if models are re-used outside the context of the pipeline after hooks have been installed. See [accelerate](https://huggingface.co/docs/accelerate/v0.18.0/en/package_reference/big_modeling#accelerate.hooks.remove_hook_from_module)
|
||||
for further docs on removing hooks.
|
||||
|
||||
## Using Channels Last memory format
|
||||
|
||||
Channels last memory format is an alternative way of ordering NCHW tensors in memory preserving dimensions ordering. Channels last tensors ordered in such a way that channels become the densest dimension (aka storing images pixel-per-pixel). Since not all operators currently support channels last format it may result in a worst performance, so it's better to try it and see if it works for your model.
|
||||
@@ -400,7 +404,14 @@ Here are the speedups we obtain on a few Nvidia GPUs when running the inference
|
||||
| A100-SXM4-40GB | 18.6it/s | 29.it/s |
|
||||
| A100-SXM-80GB | 18.7it/s | 29.5it/s |
|
||||
|
||||
To leverage it just make sure you have:
|
||||
To leverage it just make sure you have:
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
If you have PyTorch 2.0 installed, you shouldn't use xFormers!
|
||||
|
||||
</Tip>
|
||||
|
||||
- PyTorch > 1.12
|
||||
- Cuda available
|
||||
- [Installed the xformers library](xformers).
|
||||
|
||||
@@ -12,19 +12,21 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Accelerated PyTorch 2.0 support in Diffusers
|
||||
|
||||
Starting from version `0.13.0`, Diffusers supports the latest optimization from the upcoming [PyTorch 2.0](https://pytorch.org/get-started/pytorch-2.0/) release. These include:
|
||||
1. Support for accelerated transformers implementation with memory-efficient attention – no extra dependencies required.
|
||||
Starting from version `0.13.0`, Diffusers supports the latest optimization from [PyTorch 2.0](https://pytorch.org/get-started/pytorch-2.0/). These include:
|
||||
1. Support for accelerated transformers implementation with memory-efficient attention – no extra dependencies (such as `xformers`) required.
|
||||
2. [torch.compile](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) support for extra performance boost when individual models are compiled.
|
||||
|
||||
|
||||
## Installation
|
||||
To benefit from the accelerated attention implementation and `torch.compile`, you just need to install the latest versions of PyTorch 2.0 from `pip`, and make sure you are on diffusers 0.13.0 or later. As explained below, `diffusers` automatically uses the attention optimizations (but not `torch.compile`) when available.
|
||||
|
||||
To benefit from the accelerated attention implementation and `torch.compile()`, you just need to install the latest versions of PyTorch 2.0 from pip, and make sure you are on diffusers 0.13.0 or later. As explained below, diffusers automatically uses the optimized attention processor ([`AttnProcessor2_0`](https://github.com/huggingface/diffusers/blob/1a5797c6d4491a879ea5285c4efc377664e0332d/src/diffusers/models/attention_processor.py#L798)) (but not `torch.compile()`)
|
||||
when PyTorch 2.0 is available.
|
||||
|
||||
```bash
|
||||
pip install --upgrade torch torchvision diffusers
|
||||
pip install --upgrade torch diffusers
|
||||
```
|
||||
|
||||
## Using accelerated transformers and torch.compile.
|
||||
## Using accelerated transformers and `torch.compile`.
|
||||
|
||||
|
||||
1. **Accelerated Transformers implementation**
|
||||
@@ -46,13 +48,13 @@ pip install --upgrade torch torchvision diffusers
|
||||
|
||||
If you want to enable it explicitly (which is not required), you can do so as shown below.
|
||||
|
||||
```Python
|
||||
```diff
|
||||
import torch
|
||||
from diffusers import DiffusionPipeline
|
||||
from diffusers.models.attention_processor import AttnProcessor2_0
|
||||
+ from diffusers.models.attention_processor import AttnProcessor2_0
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda")
|
||||
pipe.unet.set_attn_processor(AttnProcessor2_0())
|
||||
+ pipe.unet.set_attn_processor(AttnProcessor2_0())
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
image = pipe(prompt).images[0]
|
||||
@@ -60,151 +62,383 @@ pip install --upgrade torch torchvision diffusers
|
||||
|
||||
This should be as fast and memory efficient as `xFormers`. More details [in our benchmark](#benchmark).
|
||||
|
||||
It is possible to revert to the vanilla attention processor ([`AttnProcessor`](https://github.com/huggingface/diffusers/blob/1a5797c6d4491a879ea5285c4efc377664e0332d/src/diffusers/models/attention_processor.py#L402)), which can be helpful to make the pipeline more deterministic, or if you need to convert a fine-tuned model to other formats such as [Core ML](https://huggingface.co/docs/diffusers/v0.16.0/en/optimization/coreml#how-to-run-stable-diffusion-with-core-ml). To use the normal attention processor you can use the [`~diffusers.UNet2DConditionModel.set_default_attn_processor`] function:
|
||||
|
||||
```Python
|
||||
import torch
|
||||
from diffusers import DiffusionPipeline
|
||||
from diffusers.models.attention_processor import AttnProcessor
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda")
|
||||
pipe.unet.set_default_attn_processor()
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
image = pipe(prompt).images[0]
|
||||
```
|
||||
|
||||
2. **torch.compile**
|
||||
|
||||
To get an additional speedup, we can use the new `torch.compile` feature. To do so, we simply wrap our `unet` with `torch.compile`. For more information and different options, refer to the
|
||||
To get an additional speedup, we can use the new `torch.compile` feature. Since the UNet of the pipeline is usually the most computationally expensive, we wrap the `unet` with `torch.compile` leaving rest of the sub-models (text encoder and VAE) as is. For more information and different options, refer to the
|
||||
[torch compile docs](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html).
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda")
|
||||
pipe.unet = torch.compile(pipe.unet)
|
||||
|
||||
batch_size = 10
|
||||
prompt = "A photo of an astronaut riding a horse on marse."
|
||||
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
||||
images = pipe(prompt, num_inference_steps=steps, num_images_per_prompt=batch_size).images
|
||||
```
|
||||
|
||||
Depending on the type of GPU, `compile()` can yield between 2-9% of _additional speed-up_ over the accelerated transformer optimizations. Note, however, that compilation is able to squeeze more performance improvements in more recent GPU architectures such as Ampere (A100, 3090), Ada (4090) and Hopper (H100).
|
||||
Depending on the type of GPU, `compile()` can yield between **5% - 300%** of _additional speed-up_ over the accelerated transformer optimizations. Note, however, that compilation is able to squeeze more performance improvements in more recent GPU architectures such as Ampere (A100, 3090), Ada (4090) and Hopper (H100).
|
||||
|
||||
Compilation takes some time to complete, so it is best suited for situations where you need to prepare your pipeline once and then perform the same type of inference operations multiple times.
|
||||
Compilation takes some time to complete, so it is best suited for situations where you need to prepare your pipeline once and then perform the same type of inference operations multiple times. Calling the compiled pipeline on a different image size will re-trigger compilation which can be expensive.
|
||||
|
||||
|
||||
## Benchmark
|
||||
|
||||
We conducted a simple benchmark on different GPUs to compare vanilla attention, xFormers, `torch.nn.functional.scaled_dot_product_attention` and `torch.compile+torch.nn.functional.scaled_dot_product_attention`.
|
||||
For the benchmark we used the [stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) model with 50 steps. The `xFormers` benchmark is done using the `torch==1.13.1` version, while the accelerated transformers optimizations are tested using nightly versions of PyTorch 2.0. The tables below summarize the results we got.
|
||||
We conducted a comprehensive benchmark with PyTorch 2.0's efficient attention implementation and `torch.compile` across different GPUs and batch sizes for five of our most used pipelines. We used `diffusers 0.17.0.dev0`, which [makes sure `torch.compile()` is leveraged optimally](https://github.com/huggingface/diffusers/pull/3313).
|
||||
|
||||
Please refer to [our featured blog post in the PyTorch site](https://pytorch.org/blog/accelerated-diffusers-pt-20/) for more details.
|
||||
### Benchmarking code
|
||||
|
||||
### FP16 benchmark
|
||||
#### Stable Diffusion text-to-image
|
||||
|
||||
The table below shows the benchmark results for inference using `fp16`. As we can see, `torch.nn.functional.scaled_dot_product_attention` is as fast as `xFormers` (sometimes slightly faster/slower) on all the GPUs we tested.
|
||||
And using `torch.compile` gives further speed-up of up of 10% over `xFormers`, but it's mostly noticeable on the A100 GPU.
|
||||
```python
|
||||
from diffusers import DiffusionPipeline
|
||||
import torch
|
||||
|
||||
___The time reported is in seconds.___
|
||||
path = "runwayml/stable-diffusion-v1-5"
|
||||
|
||||
| GPU | Batch Size | Vanilla Attention | xFormers | PyTorch2.0 SDPA | SDPA + torch.compile | Speed over xformers (%) |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| A100 | 1 | 2.69 | 2.7 | 1.98 | 2.47 | 8.52 |
|
||||
| A100 | 2 | 3.21 | 3.04 | 2.38 | 2.78 | 8.55 |
|
||||
| A100 | 4 | 5.27 | 3.91 | 3.89 | 3.53 | 9.72 |
|
||||
| A100 | 8 | 9.74 | 7.03 | 7.04 | 6.62 | 5.83 |
|
||||
| A100 | 10 | 12.02 | 8.7 | 8.67 | 8.45 | 2.87 |
|
||||
| A100 | 16 | 18.95 | 13.57 | 13.55 | 13.20 | 2.73 |
|
||||
| A100 | 32 (1) | OOM | 26.56 | 26.68 | 25.85 | 2.67 |
|
||||
| A100 | 64 | | 52.51 | 53.03 | 50.93 | 3.01 |
|
||||
| | | | | | | |
|
||||
| A10 | 4 | 13.94 | 9.81 | 10.01 | 9.35 | 4.69 |
|
||||
| A10 | 8 | 27.09 | 19 | 19.53 | 18.33 | 3.53 |
|
||||
| A10 | 10 | 33.69 | 23.53 | 24.19 | 22.52 | 4.29 |
|
||||
| A10 | 16 | OOM | 37.55 | 38.31 | 36.81 | 1.97 |
|
||||
| A10 | 32 (1) | | 77.19 | 78.43 | 76.64 | 0.71 |
|
||||
| A10 | 64 (1) | | 173.59 | 158.99 | 155.14 | 10.63 |
|
||||
| | | | | | | |
|
||||
| T4 | 4 | 38.81 | 30.09 | 29.74 | 27.55 | 8.44 |
|
||||
| T4 | 8 | OOM | 55.71 | 55.99 | 53.85 | 3.34 |
|
||||
| T4 | 10 | OOM | 68.96 | 69.86 | 65.35 | 5.23 |
|
||||
| T4 | 16 | OOM | 111.47 | 113.26 | 106.93 | 4.07 |
|
||||
| | | | | | | |
|
||||
| V100 | 4 | 9.84 | 8.16 | 8.09 | 7.65 | 6.25 |
|
||||
| V100 | 8 | OOM | 15.62 | 15.44 | 14.59 | 6.59 |
|
||||
| V100 | 10 | OOM | 19.52 | 19.28 | 18.18 | 6.86 |
|
||||
| V100 | 16 | OOM | 30.29 | 29.84 | 28.22 | 6.83 |
|
||||
| | | | | | | |
|
||||
| 3090 | 1 | 2.94 | 2.5 | 2.42 | 2.33 | 6.80 |
|
||||
| 3090 | 4 | 10.04 | 7.82 | 7.72 | 7.38 | 5.63 |
|
||||
| 3090 | 8 | 19.27 | 14.97 | 14.88 | 14.15 | 5.48 |
|
||||
| 3090 | 10| 24.08 | 18.7 | 18.62 | 18.12 | 3.10 |
|
||||
| 3090 | 16 | OOM | 29.06 | 28.88 | 28.2 | 2.96 |
|
||||
| 3090 | 32 (1) | | 58.05 | 57.42 | 56.28 | 3.05 |
|
||||
| 3090 | 64 (1) | | 126.54 | 114.27 | 112.21 | 11.32 |
|
||||
| | | | | | | |
|
||||
| 3090 Ti | 1 | 2.7 | 2.26 | 2.19 | 2.12 | 6.19 |
|
||||
| 3090 Ti | 4 | 9.07 | 7.14 | 7.00 | 6.71 | 6.02 |
|
||||
| 3090 Ti | 8 | 17.51 | 13.65 | 13.53 | 12.94 | 5.20 |
|
||||
| 3090 Ti | 10 (2) | 21.79 | 16.85 | 16.77 | 16.44 | 2.43 |
|
||||
| 3090 Ti | 16 | OOM | 26.1 | 26.04 | 25.53 | 2.18 |
|
||||
| 3090 Ti | 32 (1) | | 51.78 | 51.71 | 50.91 | 1.68 |
|
||||
| 3090 Ti | 64 (1) | | 112.02 | 102.78 | 100.89 | 9.94 |
|
||||
| | | | | | | |
|
||||
| 4090 | 1 | 4.47 | 3.98 | 1.28 | 1.21 | 69.60 |
|
||||
| 4090 | 4 | 10.48 | 8.37 | 3.76 | 3.56 | 57.47 |
|
||||
| 4090 | 8 | 14.33 | 10.22 | 7.43 | 6.99 | 31.60 |
|
||||
| 4090 | 16 | | 17.07 | 14.98 | 14.58 | 14.59 |
|
||||
| 4090 | 32 (1) | | 39.03 | 30.18 | 29.49 | 24.44 |
|
||||
| 4090 | 64 (1) | | 77.29 | 61.34 | 59.96 | 22.42 |
|
||||
run_compile = True # Set True / False
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
|
||||
pipe = pipe.to("cuda")
|
||||
pipe.unet.to(memory_format=torch.channels_last)
|
||||
|
||||
if run_compile:
|
||||
print("Run torch compile")
|
||||
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
||||
|
||||
prompt = "ghibli style, a fantasy landscape with castles"
|
||||
|
||||
for _ in range(3):
|
||||
images = pipe(prompt=prompt).images
|
||||
```
|
||||
|
||||
#### Stable Diffusion image-to-image
|
||||
|
||||
```python
|
||||
from diffusers import StableDiffusionImg2ImgPipeline
|
||||
import requests
|
||||
import torch
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
|
||||
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
||||
|
||||
response = requests.get(url)
|
||||
init_image = Image.open(BytesIO(response.content)).convert("RGB")
|
||||
init_image = init_image.resize((512, 512))
|
||||
|
||||
path = "runwayml/stable-diffusion-v1-5"
|
||||
|
||||
run_compile = True # Set True / False
|
||||
|
||||
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(path, torch_dtype=torch.float16)
|
||||
pipe = pipe.to("cuda")
|
||||
pipe.unet.to(memory_format=torch.channels_last)
|
||||
|
||||
if run_compile:
|
||||
print("Run torch compile")
|
||||
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
||||
|
||||
prompt = "ghibli style, a fantasy landscape with castles"
|
||||
|
||||
for _ in range(3):
|
||||
image = pipe(prompt=prompt, image=init_image).images[0]
|
||||
```
|
||||
|
||||
#### Stable Diffusion - inpainting
|
||||
|
||||
```python
|
||||
from diffusers import StableDiffusionInpaintPipeline
|
||||
import requests
|
||||
import torch
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
|
||||
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
||||
|
||||
def download_image(url):
|
||||
response = requests.get(url)
|
||||
return Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
|
||||
|
||||
### FP32 benchmark
|
||||
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
|
||||
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
|
||||
|
||||
The table below shows the benchmark results for inference using `fp32`. In this case, `torch.nn.functional.scaled_dot_product_attention` is faster than `xFormers` on all the GPUs we tested.
|
||||
init_image = download_image(img_url).resize((512, 512))
|
||||
mask_image = download_image(mask_url).resize((512, 512))
|
||||
|
||||
Using `torch.compile` in addition to the accelerated transformers implementation can yield up to 19% performance improvement over `xFormers` in Ampere and Ada cards, and up to 20% (Ampere) or 28% (Ada) over vanilla attention.
|
||||
path = "runwayml/stable-diffusion-inpainting"
|
||||
|
||||
| GPU | Batch Size | Vanilla Attention | xFormers | PyTorch2.0 SDPA | SDPA + torch.compile | Speed over xformers (%) | Speed over vanilla (%) |
|
||||
| --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
| A100 | 1 | 4.97 | 3.86 | 2.6 | 2.86 | 25.91 | 42.45 |
|
||||
| A100 | 2 | 9.03 | 6.76 | 4.41 | 4.21 | 37.72 | 53.38 |
|
||||
| A100 | 4 | 16.70 | 12.42 | 7.94 | 7.54 | 39.29 | 54.85 |
|
||||
| A100 | 10 | OOM | 29.93 | 18.70 | 18.46 | 38.32 | |
|
||||
| A100 | 16 | | 47.08 | 29.41 | 29.04 | 38.32 | |
|
||||
| A100 | 32 | | 92.89 | 57.55 | 56.67 | 38.99 | |
|
||||
| A100 | 64 | | 185.3 | 114.8 | 112.98 | 39.03 | |
|
||||
| | | | | | | |
|
||||
| A10 | 1 | 10.59 | 8.81 | 7.51 | 7.35 | 16.57 | 30.59 |
|
||||
| A10 | 4 | 34.77 | 27.63 | 22.77 | 22.07 | 20.12 | 36.53 |
|
||||
| A10 | 8 | | 56.19 | 43.53 | 43.86 | 21.94 | |
|
||||
| A10 | 16 | | 116.49 | 88.56 | 86.64 | 25.62 | |
|
||||
| A10 | 32 | | 221.95 | 175.74 | 168.18 | 24.23 | |
|
||||
| A10 | 48 | | 333.23 | 264.84 | | 20.52 | |
|
||||
| | | | | | | |
|
||||
| T4 | 1 | 28.2 | 24.49 | 23.93 | 23.56 | 3.80 | 16.45 |
|
||||
| T4 | 2 | 52.77 | 45.7 | 45.88 | 45.06 | 1.40 | 14.61 |
|
||||
| T4 | 4 | OOM | 85.72 | 85.78 | 84.48 | 1.45 | |
|
||||
| T4 | 8 | | 149.64 | 150.75 | 148.4 | 0.83 | |
|
||||
| | | | | | | |
|
||||
| V100 | 1 | 7.4 | 6.84 | 6.8 | 6.66 | 2.63 | 10.00 |
|
||||
| V100 | 2 | 13.85 | 12.81 | 12.66 | 12.35 | 3.59 | 10.83 |
|
||||
| V100 | 4 | OOM | 25.73 | 25.31 | 24.78 | 3.69 | |
|
||||
| V100 | 8 | | 43.95 | 43.37 | 42.25 | 3.87 | |
|
||||
| V100 | 16 | | 84.99 | 84.73 | 82.55 | 2.87 | |
|
||||
| | | | | | | |
|
||||
| 3090 | 1 | 7.09 | 6.78 | 5.34 | 5.35 | 21.09 | 24.54 |
|
||||
| 3090 | 4 | 22.69 | 21.45 | 18.56 | 18.18 | 15.24 | 19.88 |
|
||||
| 3090 | 8 | | 42.59 | 36.68 | 35.61 | 16.39 | |
|
||||
| 3090 | 16 | | 85.35 | 72.93 | 70.18 | 17.77 | |
|
||||
| 3090 | 32 (1) | | 162.05 | 143.46 | 138.67 | 14.43 | |
|
||||
| | | | | | | |
|
||||
| 3090 Ti | 1 | 6.45 | 6.19 | 4.99 | 4.89 | 21.00 | 24.19 |
|
||||
| 3090 Ti | 4 | 20.32 | 19.31 | 17.02 | 16.48 | 14.66 | 18.90 |
|
||||
| 3090 Ti | 8 | | 37.93 | 33.21 | 32.24 | 15.00 | |
|
||||
| 3090 Ti | 16 | | 75.37 | 66.63 | 64.5 | 14.42 | |
|
||||
| 3090 Ti | 32 (1) | | 142.55 | 128.89 | 124.92 | 12.37 | |
|
||||
| | | | | | | |
|
||||
| 4090 | 1 | 5.54 | 4.99 | 2.66 | 2.58 | 48.30 | 53.43 |
|
||||
| 4090 | 4 | 13.67 | 11.4 | 8.81 | 8.46 | 25.79 | 38.11 |
|
||||
| 4090 | 8 | | 19.79 | 17.55 | 16.62 | 16.02 | |
|
||||
| 4090 | 16 | | 38.62 | 35.65 | 34.07 | 11.78 | |
|
||||
| 4090 | 32 (1) | | 76.57 | 69.48 | 65.35 | 14.65 | |
|
||||
| 4090 | 48 | | 114.44 | 106.3 | | 7.11 | |
|
||||
run_compile = True # Set True / False
|
||||
|
||||
pipe = StableDiffusionInpaintPipeline.from_pretrained(path, torch_dtype=torch.float16)
|
||||
pipe = pipe.to("cuda")
|
||||
pipe.unet.to(memory_format=torch.channels_last)
|
||||
|
||||
if run_compile:
|
||||
print("Run torch compile")
|
||||
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
||||
|
||||
prompt = "ghibli style, a fantasy landscape with castles"
|
||||
|
||||
for _ in range(3):
|
||||
image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
|
||||
```
|
||||
|
||||
#### ControlNet
|
||||
|
||||
```python
|
||||
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
|
||||
import requests
|
||||
import torch
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
|
||||
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
||||
|
||||
response = requests.get(url)
|
||||
init_image = Image.open(BytesIO(response.content)).convert("RGB")
|
||||
init_image = init_image.resize((512, 512))
|
||||
|
||||
path = "runwayml/stable-diffusion-v1-5"
|
||||
|
||||
run_compile = True # Set True / False
|
||||
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
||||
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
||||
path, controlnet=controlnet, torch_dtype=torch.float16
|
||||
)
|
||||
|
||||
pipe = pipe.to("cuda")
|
||||
pipe.unet.to(memory_format=torch.channels_last)
|
||||
pipe.controlnet.to(memory_format=torch.channels_last)
|
||||
|
||||
if run_compile:
|
||||
print("Run torch compile")
|
||||
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
||||
pipe.controlnet = torch.compile(pipe.controlnet, mode="reduce-overhead", fullgraph=True)
|
||||
|
||||
prompt = "ghibli style, a fantasy landscape with castles"
|
||||
|
||||
for _ in range(3):
|
||||
image = pipe(prompt=prompt, image=init_image).images[0]
|
||||
```
|
||||
|
||||
#### IF text-to-image + upscaling
|
||||
|
||||
```python
|
||||
from diffusers import DiffusionPipeline
|
||||
import torch
|
||||
|
||||
run_compile = True # Set True / False
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-M-v1.0", variant="fp16", text_encoder=None, torch_dtype=torch.float16)
|
||||
pipe.to("cuda")
|
||||
pipe_2 = DiffusionPipeline.from_pretrained("DeepFloyd/IF-II-M-v1.0", variant="fp16", text_encoder=None, torch_dtype=torch.float16)
|
||||
pipe_2.to("cuda")
|
||||
pipe_3 = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-x4-upscaler", torch_dtype=torch.float16)
|
||||
pipe_3.to("cuda")
|
||||
|
||||
|
||||
(1) Batch Size >= 32 requires enable_vae_slicing() because of https://github.com/pytorch/pytorch/issues/81665.
|
||||
This is required for PyTorch 1.13.1, and also for PyTorch 2.0 and large batch sizes.
|
||||
pipe.unet.to(memory_format=torch.channels_last)
|
||||
pipe_2.unet.to(memory_format=torch.channels_last)
|
||||
pipe_3.unet.to(memory_format=torch.channels_last)
|
||||
|
||||
For more details about how this benchmark was run, please refer to [this PR](https://github.com/huggingface/diffusers/pull/2303) and to [the blog post](https://pytorch.org/blog/accelerated-diffusers-pt-20/).
|
||||
if run_compile:
|
||||
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
||||
pipe_2.unet = torch.compile(pipe_2.unet, mode="reduce-overhead", fullgraph=True)
|
||||
pipe_3.unet = torch.compile(pipe_3.unet, mode="reduce-overhead", fullgraph=True)
|
||||
|
||||
prompt = "the blue hulk"
|
||||
|
||||
prompt_embeds = torch.randn((1, 2, 4096), dtype=torch.float16)
|
||||
neg_prompt_embeds = torch.randn((1, 2, 4096), dtype=torch.float16)
|
||||
|
||||
for _ in range(3):
|
||||
image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=neg_prompt_embeds, output_type="pt").images
|
||||
image_2 = pipe_2(image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=neg_prompt_embeds, output_type="pt").images
|
||||
image_3 = pipe_3(prompt=prompt, image=image, noise_level=100).images
|
||||
```
|
||||
|
||||
To give you a pictorial overview of the possible speed-ups that can be obtained with PyTorch 2.0 and `torch.compile()`,
|
||||
here is a plot that shows relative speed-ups for the [Stable Diffusion text-to-image pipeline](StableDiffusionPipeline) across five
|
||||
different GPU families (with a batch size of 4):
|
||||
|
||||

|
||||
|
||||
To give you an even better idea of how this speed-up holds for the other pipelines presented above, consider the following
|
||||
plot that shows the benchmarking numbers from an A100 across three different batch sizes
|
||||
(with PyTorch 2.0 nightly and `torch.compile()`):
|
||||
|
||||

|
||||
|
||||
_(Our benchmarking metric for the plots above is **number of iterations/second**)_
|
||||
|
||||
But we reveal all the benchmarking numbers in the interest of transparency!
|
||||
|
||||
In the following tables, we report our findings in terms of the number of **_iterations processed per second_**.
|
||||
|
||||
### A100 (batch size: 1)
|
||||
|
||||
| **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| SD - txt2img | 21.66 | 23.13 | 44.03 | 49.74 |
|
||||
| SD - img2img | 21.81 | 22.40 | 43.92 | 46.32 |
|
||||
| SD - inpaint | 22.24 | 23.23 | 43.76 | 49.25 |
|
||||
| SD - controlnet | 15.02 | 15.82 | 32.13 | 36.08 |
|
||||
| IF | 20.21 / <br>13.84 / <br>24.00 | 20.12 / <br>13.70 / <br>24.03 | ❌ | 97.34 / <br>27.23 / <br>111.66 |
|
||||
|
||||
### A100 (batch size: 4)
|
||||
|
||||
| **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| SD - txt2img | 11.6 | 13.12 | 14.62 | 17.27 |
|
||||
| SD - img2img | 11.47 | 13.06 | 14.66 | 17.25 |
|
||||
| SD - inpaint | 11.67 | 13.31 | 14.88 | 17.48 |
|
||||
| SD - controlnet | 8.28 | 9.38 | 10.51 | 12.41 |
|
||||
| IF | 25.02 | 18.04 | ❌ | 48.47 |
|
||||
|
||||
### A100 (batch size: 16)
|
||||
|
||||
| **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| SD - txt2img | 3.04 | 3.6 | 3.83 | 4.68 |
|
||||
| SD - img2img | 2.98 | 3.58 | 3.83 | 4.67 |
|
||||
| SD - inpaint | 3.04 | 3.66 | 3.9 | 4.76 |
|
||||
| SD - controlnet | 2.15 | 2.58 | 2.74 | 3.35 |
|
||||
| IF | 8.78 | 9.82 | ❌ | 16.77 |
|
||||
|
||||
### V100 (batch size: 1)
|
||||
|
||||
| **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| SD - txt2img | 18.99 | 19.14 | 20.95 | 22.17 |
|
||||
| SD - img2img | 18.56 | 19.18 | 20.95 | 22.11 |
|
||||
| SD - inpaint | 19.14 | 19.06 | 21.08 | 22.20 |
|
||||
| SD - controlnet | 13.48 | 13.93 | 15.18 | 15.88 |
|
||||
| IF | 20.01 / <br>9.08 / <br>23.34 | 19.79 / <br>8.98 / <br>24.10 | ❌ | 55.75 / <br>11.57 / <br>57.67 |
|
||||
|
||||
### V100 (batch size: 4)
|
||||
|
||||
| **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| SD - txt2img | 5.96 | 5.89 | 6.83 | 6.86 |
|
||||
| SD - img2img | 5.90 | 5.91 | 6.81 | 6.82 |
|
||||
| SD - inpaint | 5.99 | 6.03 | 6.93 | 6.95 |
|
||||
| SD - controlnet | 4.26 | 4.29 | 4.92 | 4.93 |
|
||||
| IF | 15.41 | 14.76 | ❌ | 22.95 |
|
||||
|
||||
### V100 (batch size: 16)
|
||||
|
||||
| **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| SD - txt2img | 1.66 | 1.66 | 1.92 | 1.90 |
|
||||
| SD - img2img | 1.65 | 1.65 | 1.91 | 1.89 |
|
||||
| SD - inpaint | 1.69 | 1.69 | 1.95 | 1.93 |
|
||||
| SD - controlnet | 1.19 | 1.19 | OOM after warmup | 1.36 |
|
||||
| IF | 5.43 | 5.29 | ❌ | 7.06 |
|
||||
|
||||
### T4 (batch size: 1)
|
||||
|
||||
| **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| SD - txt2img | 6.9 | 6.95 | 7.3 | 7.56 |
|
||||
| SD - img2img | 6.84 | 6.99 | 7.04 | 7.55 |
|
||||
| SD - inpaint | 6.91 | 6.7 | 7.01 | 7.37 |
|
||||
| SD - controlnet | 4.89 | 4.86 | 5.35 | 5.48 |
|
||||
| IF | 17.42 / <br>2.47 / <br>18.52 | 16.96 / <br>2.45 / <br>18.69 | ❌ | 24.63 / <br>2.47 / <br>23.39 |
|
||||
|
||||
### T4 (batch size: 4)
|
||||
|
||||
| **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| SD - txt2img | 1.79 | 1.79 | 2.03 | 1.99 |
|
||||
| SD - img2img | 1.77 | 1.77 | 2.05 | 2.04 |
|
||||
| SD - inpaint | 1.81 | 1.82 | 2.09 | 2.09 |
|
||||
| SD - controlnet | 1.34 | 1.27 | 1.47 | 1.46 |
|
||||
| IF | 5.79 | 5.61 | ❌ | 7.39 |
|
||||
|
||||
### T4 (batch size: 16)
|
||||
|
||||
| **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| SD - txt2img | 2.34s | 2.30s | OOM after 2nd iteration | 1.99s |
|
||||
| SD - img2img | 2.35s | 2.31s | OOM after warmup | 2.00s |
|
||||
| SD - inpaint | 2.30s | 2.26s | OOM after 2nd iteration | 1.95s |
|
||||
| SD - controlnet | OOM after 2nd iteration | OOM after 2nd iteration | OOM after warmup | OOM after warmup |
|
||||
| IF * | 1.44 | 1.44 | ❌ | 1.94 |
|
||||
|
||||
### RTX 3090 (batch size: 1)
|
||||
|
||||
| **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| SD - txt2img | 22.56 | 22.84 | 23.84 | 25.69 |
|
||||
| SD - img2img | 22.25 | 22.61 | 24.1 | 25.83 |
|
||||
| SD - inpaint | 22.22 | 22.54 | 24.26 | 26.02 |
|
||||
| SD - controlnet | 16.03 | 16.33 | 17.38 | 18.56 |
|
||||
| IF | 27.08 / <br>9.07 / <br>31.23 | 26.75 / <br>8.92 / <br>31.47 | ❌ | 68.08 / <br>11.16 / <br>65.29 |
|
||||
|
||||
### RTX 3090 (batch size: 4)
|
||||
|
||||
| **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| SD - txt2img | 6.46 | 6.35 | 7.29 | 7.3 |
|
||||
| SD - img2img | 6.33 | 6.27 | 7.31 | 7.26 |
|
||||
| SD - inpaint | 6.47 | 6.4 | 7.44 | 7.39 |
|
||||
| SD - controlnet | 4.59 | 4.54 | 5.27 | 5.26 |
|
||||
| IF | 16.81 | 16.62 | ❌ | 21.57 |
|
||||
|
||||
### RTX 3090 (batch size: 16)
|
||||
|
||||
| **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| SD - txt2img | 1.7 | 1.69 | 1.93 | 1.91 |
|
||||
| SD - img2img | 1.68 | 1.67 | 1.93 | 1.9 |
|
||||
| SD - inpaint | 1.72 | 1.71 | 1.97 | 1.94 |
|
||||
| SD - controlnet | 1.23 | 1.22 | 1.4 | 1.38 |
|
||||
| IF | 5.01 | 5.00 | ❌ | 6.33 |
|
||||
|
||||
### RTX 4090 (batch size: 1)
|
||||
|
||||
| **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| SD - txt2img | 40.5 | 41.89 | 44.65 | 49.81 |
|
||||
| SD - img2img | 40.39 | 41.95 | 44.46 | 49.8 |
|
||||
| SD - inpaint | 40.51 | 41.88 | 44.58 | 49.72 |
|
||||
| SD - controlnet | 29.27 | 30.29 | 32.26 | 36.03 |
|
||||
| IF | 69.71 / <br>18.78 / <br>85.49 | 69.13 / <br>18.80 / <br>85.56 | ❌ | 124.60 / <br>26.37 / <br>138.79 |
|
||||
|
||||
### RTX 4090 (batch size: 4)
|
||||
|
||||
| **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| SD - txt2img | 12.62 | 12.84 | 15.32 | 15.59 |
|
||||
| SD - img2img | 12.61 | 12,.79 | 15.35 | 15.66 |
|
||||
| SD - inpaint | 12.65 | 12.81 | 15.3 | 15.58 |
|
||||
| SD - controlnet | 9.1 | 9.25 | 11.03 | 11.22 |
|
||||
| IF | 31.88 | 31.14 | ❌ | 43.92 |
|
||||
|
||||
### RTX 4090 (batch size: 16)
|
||||
|
||||
| **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| SD - txt2img | 3.17 | 3.2 | 3.84 | 3.85 |
|
||||
| SD - img2img | 3.16 | 3.2 | 3.84 | 3.85 |
|
||||
| SD - inpaint | 3.17 | 3.2 | 3.85 | 3.85 |
|
||||
| SD - controlnet | 2.23 | 2.3 | 2.7 | 2.75 |
|
||||
| IF | 9.26 | 9.2 | ❌ | 13.31 |
|
||||
|
||||
## Notes
|
||||
|
||||
* Follow [this PR](https://github.com/huggingface/diffusers/pull/3313) for more details on the environment used for conducting the benchmarks.
|
||||
* For the IF pipeline and batch sizes > 1, we only used a batch size of >1 in the first IF pipeline for text-to-image generation and NOT for upscaling. So, that means the two upscaling pipelines received a batch size of 1.
|
||||
|
||||
*Thanks to [Horace He](https://github.com/Chillee) from the PyTorch team for their support in improving our support of `torch.compile()` in Diffusers.*
|
||||
@@ -33,7 +33,7 @@ The quicktour is a simplified version of the introductory 🧨 Diffusers [notebo
|
||||
Before you begin, make sure you have all the necessary libraries installed:
|
||||
|
||||
```bash
|
||||
pip install --upgrade diffusers accelerate transformers
|
||||
!pip install --upgrade diffusers accelerate transformers
|
||||
```
|
||||
|
||||
- [🤗 Accelerate](https://huggingface.co/docs/accelerate/index) speeds up model loading for inference and training.
|
||||
@@ -121,9 +121,9 @@ Save the image by calling `save`:
|
||||
|
||||
You can also use the pipeline locally. The only difference is you need to download the weights first:
|
||||
|
||||
```
|
||||
git lfs install
|
||||
git clone https://huggingface.co/runwayml/stable-diffusion-v1-5
|
||||
```bash
|
||||
!git lfs install
|
||||
!git clone https://huggingface.co/runwayml/stable-diffusion-v1-5
|
||||
```
|
||||
|
||||
Then load the saved weights into the pipeline:
|
||||
|
||||
@@ -153,7 +153,7 @@ def get_inputs(batch_size=1):
|
||||
You'll also need a function that'll display each batch of images:
|
||||
|
||||
```python
|
||||
from PIL import image
|
||||
from PIL import Image
|
||||
|
||||
|
||||
def image_grid(imgs, rows=2, cols=2):
|
||||
@@ -246,7 +246,7 @@ image_grid(images, rows=2, cols=4)
|
||||
Pretty impressive! Let's tweak the second image - corresponding to the `Generator` with a seed of `1` - a bit more by adding some text about the age of the subject:
|
||||
|
||||
```python
|
||||
prommpts = [
|
||||
prompts = [
|
||||
"portrait photo of the oldest warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta",
|
||||
"portrait photo of a old warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta",
|
||||
"portrait photo of a warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta",
|
||||
@@ -266,6 +266,6 @@ image_grid(images)
|
||||
|
||||
In this tutorial, you learned how to optimize a [`DiffusionPipeline`] for computational and memory efficiency as well as improving the quality of generated outputs. If you're interested in making your pipeline even faster, take a look at the following resources:
|
||||
|
||||
- Enable [xFormers](./optimization/xformers) memory efficient attention mechanism for faster speed and reduced memory consumption.
|
||||
- Learn how in [PyTorch 2.0](./optimization/torch2.0), [`torch.compile`](https://pytorch.org/docs/stable/generated/torch.compile.html) can yield 2-9% faster inference speed.
|
||||
- Many optimization techniques for inference are also included in this memory and speed [guide](./optimization/fp16), such as memory offloading.
|
||||
- Learn how [PyTorch 2.0](./optimization/torch2.0) and [`torch.compile`](https://pytorch.org/docs/stable/generated/torch.compile.html) can yield 5 - 300% faster inference speed. On an A100 GPU, inference can be up to 50% faster!
|
||||
- If you can't use PyTorch 2, we recommend you install [xFormers](./optimization/xformers). Its memory-efficient attention mechanism works great with PyTorch 1.13.1 for faster speed and reduced memory consumption.
|
||||
- Other optimization techniques, such as model offloading, are covered in [this guide](./optimization/fp16).
|
||||
|
||||
42
docs/source/en/training/adapt_a_model.mdx
Normal file
42
docs/source/en/training/adapt_a_model.mdx
Normal file
@@ -0,0 +1,42 @@
|
||||
# Adapt a model to a new task
|
||||
|
||||
Many diffusion systems share the same components, allowing you to adapt a pretrained model for one task to an entirely different task.
|
||||
|
||||
This guide will show you how to adapt a pretrained text-to-image model for inpainting by initializing and modifying the architecture of a pretrained [`UNet2DConditionModel`].
|
||||
|
||||
## Configure UNet2DConditionModel parameters
|
||||
|
||||
A [`UNet2DConditionModel`] by default accepts 4 channels in the [input sample](https://huggingface.co/docs/diffusers/v0.16.0/en/api/models#diffusers.UNet2DConditionModel.in_channels). For example, load a pretrained text-to-image model like [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) and take a look at the number of `in_channels`:
|
||||
|
||||
```py
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
||||
pipeline.unet.config["in_channels"]
|
||||
4
|
||||
```
|
||||
|
||||
Inpainting requires 9 channels in the input sample. You can check this value in a pretrained inpainting model like [`runwayml/stable-diffusion-inpainting`](https://huggingface.co/runwayml/stable-diffusion-inpainting):
|
||||
|
||||
```py
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
|
||||
pipeline.unet.config["in_channels"]
|
||||
9
|
||||
```
|
||||
|
||||
To adapt your text-to-image model for inpainting, you'll need to change the number of `in_channels` from 4 to 9.
|
||||
|
||||
Initialize a [`UNet2DConditionModel`] with the pretrained text-to-image model weights, and change `in_channels` to 9. Changing the number of `in_channels` means you need to set `ignore_mismatched_sizes=True` and `low_cpu_mem_usage=False` to avoid a size mismatch error because the shape is different now.
|
||||
|
||||
```py
|
||||
from diffusers import UNet2DConditionModel
|
||||
|
||||
model_id = "runwayml/stable-diffusion-v1-5"
|
||||
unet = UNet2DConditionModel.from_pretrained(
|
||||
model_id, subfolder="unet", in_channels=9, low_cpu_mem_usage=False, ignore_mismatched_sizes=True
|
||||
)
|
||||
```
|
||||
|
||||
The pretrained weights of the other components from the text-to-image model are initialized from their checkpoints, but the input channel weights (`conv_in.weight`) of the `unet` are randomly initialized. It is important to finetune the model for inpainting because otherwise the model returns noise.
|
||||
@@ -33,7 +33,12 @@ cd diffusers
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
Then navigate into the example folder and run:
|
||||
Then navigate into the [example folder](https://github.com/huggingface/diffusers/tree/main/examples/controlnet)
|
||||
```bash
|
||||
cd examples/controlnet
|
||||
```
|
||||
|
||||
Now run:
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
@@ -64,6 +69,8 @@ The original dataset is hosted in the ControlNet [repo](https://huggingface.co/l
|
||||
|
||||
Our training examples use [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) because that is what the original set of ControlNet models was trained on. However, ControlNet can be trained to augment any compatible Stable Diffusion model (such as [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4)) or [`stabilityai/stable-diffusion-2-1`](https://huggingface.co/stabilityai/stable-diffusion-2-1).
|
||||
|
||||
To use your own dataset, take a look at the [Create a dataset for training](create_dataset) guide.
|
||||
|
||||
## Training
|
||||
|
||||
Download the following images to condition our training with:
|
||||
@@ -74,7 +81,9 @@ wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/ma
|
||||
wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png
|
||||
```
|
||||
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`~diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path`] argument.
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) argument.
|
||||
|
||||
The training script creates and saves a `diffusion_pytorch_model.bin` file in your repository.
|
||||
|
||||
```bash
|
||||
export MODEL_DIR="runwayml/stable-diffusion-v1-5"
|
||||
@@ -88,7 +97,8 @@ accelerate launch train_controlnet.py \
|
||||
--learning_rate=1e-5 \
|
||||
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
|
||||
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
|
||||
--train_batch_size=4
|
||||
--train_batch_size=4 \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
This default configuration requires ~38GB VRAM.
|
||||
@@ -111,7 +121,8 @@ accelerate launch train_controlnet.py \
|
||||
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
|
||||
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
|
||||
--train_batch_size=1 \
|
||||
--gradient_accumulation_steps=4
|
||||
--gradient_accumulation_steps=4 \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
## Training with multiple GPUs
|
||||
@@ -134,7 +145,8 @@ accelerate launch --mixed_precision="fp16" --multi_gpu train_controlnet.py \
|
||||
--train_batch_size=4 \
|
||||
--mixed_precision="fp16" \
|
||||
--tracker_project_name="controlnet-demo" \
|
||||
--report_to=wandb
|
||||
--report_to=wandb \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
## Example results
|
||||
@@ -182,7 +194,8 @@ accelerate launch train_controlnet.py \
|
||||
--train_batch_size=1 \
|
||||
--gradient_accumulation_steps=4 \
|
||||
--gradient_checkpointing \
|
||||
--use_8bit_adam
|
||||
--use_8bit_adam \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
## Training on a 12 GB GPU
|
||||
@@ -210,7 +223,8 @@ accelerate launch train_controlnet.py \
|
||||
--gradient_checkpointing \
|
||||
--use_8bit_adam \
|
||||
--enable_xformers_memory_efficient_attention \
|
||||
--set_grads_to_none
|
||||
--set_grads_to_none \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
When using `enable_xformers_memory_efficient_attention`, please make sure to install `xformers` by `pip install xformers`.
|
||||
@@ -274,7 +288,8 @@ accelerate launch train_controlnet.py \
|
||||
--gradient_checkpointing \
|
||||
--enable_xformers_memory_efficient_attention \
|
||||
--set_grads_to_none \
|
||||
--mixed_precision fp16
|
||||
--mixed_precision fp16 \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
## Inference
|
||||
|
||||
90
docs/source/en/training/create_dataset.mdx
Normal file
90
docs/source/en/training/create_dataset.mdx
Normal file
@@ -0,0 +1,90 @@
|
||||
# Create a dataset for training
|
||||
|
||||
There are many datasets on the [Hub](https://huggingface.co/datasets?task_categories=task_categories:text-to-image&sort=downloads) to train a model on, but if you can't find one you're interested in or want to use your own, you can create a dataset with the 🤗 [Datasets](hf.co/docs/datasets) library. The dataset structure depends on the task you want to train your model on. The most basic dataset structure is a directory of images for tasks like unconditional image generation. Another dataset structure may be a directory of images and a text file containing their corresponding text captions for tasks like text-to-image generation.
|
||||
|
||||
This guide will show you two ways to create a dataset to finetune on:
|
||||
|
||||
- provide a folder of images to the `--train_data_dir` argument
|
||||
- upload a dataset to the Hub and pass the dataset repository id to the `--dataset_name` argument
|
||||
|
||||
<Tip>
|
||||
|
||||
💡 Learn more about how to create an image dataset for training in the [Create an image dataset](https://huggingface.co/docs/datasets/image_dataset) guide.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Provide a dataset as a folder
|
||||
|
||||
For unconditional generation, you can provide your own dataset as a folder of images. The training script uses the [`ImageFolder`](https://huggingface.co/docs/datasets/en/image_dataset#imagefolder) builder from 🤗 Datasets to automatically build a dataset from the folder. Your directory structure should look like:
|
||||
|
||||
```bash
|
||||
data_dir/xxx.png
|
||||
data_dir/xxy.png
|
||||
data_dir/[...]/xxz.png
|
||||
```
|
||||
|
||||
Pass the path to the dataset directory to the `--train_data_dir` argument, and then you can start training:
|
||||
|
||||
```bash
|
||||
accelerate launch train_unconditional.py \
|
||||
--train_data_dir <path-to-train-directory> \
|
||||
<other-arguments>
|
||||
```
|
||||
|
||||
## Upload your data to the Hub
|
||||
|
||||
<Tip>
|
||||
|
||||
💡 For more details and context about creating and uploading a dataset to the Hub, take a look at the [Image search with 🤗 Datasets](https://huggingface.co/blog/image-search-datasets) post.
|
||||
|
||||
</Tip>
|
||||
|
||||
Start by creating a dataset with the [`ImageFolder`](https://huggingface.co/docs/datasets/image_load#imagefolder) feature, which creates an `image` column containing the PIL-encoded images.
|
||||
|
||||
You can use the `data_dir` or `data_files` parameters to specify the location of the dataset. The `data_files` parameter supports mapping specific files to dataset splits like `train` or `test`:
|
||||
|
||||
```python
|
||||
from datasets import load_dataset
|
||||
|
||||
# example 1: local folder
|
||||
dataset = load_dataset("imagefolder", data_dir="path_to_your_folder")
|
||||
|
||||
# example 2: local files (supported formats are tar, gzip, zip, xz, rar, zstd)
|
||||
dataset = load_dataset("imagefolder", data_files="path_to_zip_file")
|
||||
|
||||
# example 3: remote files (supported formats are tar, gzip, zip, xz, rar, zstd)
|
||||
dataset = load_dataset(
|
||||
"imagefolder",
|
||||
data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip",
|
||||
)
|
||||
|
||||
# example 4: providing several splits
|
||||
dataset = load_dataset(
|
||||
"imagefolder", data_files={"train": ["path/to/file1", "path/to/file2"], "test": ["path/to/file3", "path/to/file4"]}
|
||||
)
|
||||
```
|
||||
|
||||
Then use the [`~datasets.Dataset.push_to_hub`] method to upload the dataset to the Hub:
|
||||
|
||||
```python
|
||||
# assuming you have ran the huggingface-cli login command in a terminal
|
||||
dataset.push_to_hub("name_of_your_dataset")
|
||||
|
||||
# if you want to push to a private repo, simply pass private=True:
|
||||
dataset.push_to_hub("name_of_your_dataset", private=True)
|
||||
```
|
||||
|
||||
Now the dataset is available for training by passing the dataset name to the `--dataset_name` argument:
|
||||
|
||||
```bash
|
||||
accelerate launch --mixed_precision="fp16" train_text_to_image.py \
|
||||
--pretrained_model_name_or_path="runwayml/stable-diffusion-v1-5" \
|
||||
--dataset_name="name_of_your_dataset" \
|
||||
<other-arguments>
|
||||
```
|
||||
|
||||
## Next steps
|
||||
|
||||
Now that you've created a dataset, you can plug it into the `train_data_dir` (if your dataset is local) or `dataset_name` (if your dataset is on the Hub) arguments of a training script.
|
||||
|
||||
For your next steps, feel free to try and use your dataset to train a model for [unconditional generation](uncondtional_training) or [text-to-image generation](text2image)!
|
||||
@@ -33,7 +33,13 @@ cd diffusers
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
Then cd in the example folder and run
|
||||
Then cd into the [example folder](https://github.com/huggingface/diffusers/tree/main/examples/custom_diffusion)
|
||||
|
||||
```
|
||||
cd examples/custom_diffusion
|
||||
```
|
||||
|
||||
Now run
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
@@ -61,7 +67,7 @@ write_basic_config()
|
||||
```
|
||||
### Cat example 😺
|
||||
|
||||
Now let's get our dataset. Download dataset from [here](https://www.cs.cmu.edu/~custom-diffusion/assets/data.zip) and unzip it.
|
||||
Now let's get our dataset. Download dataset from [here](https://www.cs.cmu.edu/~custom-diffusion/assets/data.zip) and unzip it. To use your own dataset, take a look at the [Create a dataset for training](create_dataset) guide.
|
||||
|
||||
We also collect 200 real images using `clip-retrieval` which are combined with the target images in the training dataset as a regularization. This prevents overfitting to the the given target image. The following flags enable the regularization `with_prior_preservation`, `real_prior` with `prior_loss_weight=1.`.
|
||||
The `class_prompt` should be the category name same as target image. The collected real images are with text captions similar to the `class_prompt`. The retrieved image are saved in `class_data_dir`. You can disable `real_prior` to use generated images as regularization. To collect the real images use this command first before training.
|
||||
@@ -73,6 +79,8 @@ python retrieve.py --class_prompt cat --class_data_dir real_reg/samples_cat --nu
|
||||
|
||||
**___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___**
|
||||
|
||||
The script creates and saves model checkpoints and a `pytorch_custom_diffusion_weights.bin` file in your repository.
|
||||
|
||||
```bash
|
||||
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
||||
export OUTPUT_DIR="path-to-save-model"
|
||||
@@ -92,7 +100,8 @@ accelerate launch train_custom_diffusion.py \
|
||||
--lr_warmup_steps=0 \
|
||||
--max_train_steps=250 \
|
||||
--scale_lr --hflip \
|
||||
--modifier_token "<new1>"
|
||||
--modifier_token "<new1>" \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
**Use `--enable_xformers_memory_efficient_attention` for faster training with lower VRAM requirement (16GB per GPU). Follow [this guide](https://github.com/facebookresearch/xformers) for installation instructions.**
|
||||
@@ -124,7 +133,8 @@ accelerate launch train_custom_diffusion.py \
|
||||
--scale_lr --hflip \
|
||||
--modifier_token "<new1>" \
|
||||
--validation_prompt="<new1> cat sitting in a bucket" \
|
||||
--report_to="wandb"
|
||||
--report_to="wandb" \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
Here is an example [Weights and Biases page](https://wandb.ai/sayakpaul/custom-diffusion/runs/26ghrcau) where you can check out the intermediate results along with other training details.
|
||||
@@ -160,7 +170,8 @@ accelerate launch train_custom_diffusion.py \
|
||||
--max_train_steps=500 \
|
||||
--num_class_images=200 \
|
||||
--scale_lr --hflip \
|
||||
--modifier_token "<new1>+<new2>"
|
||||
--modifier_token "<new1>+<new2>" \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
Here is an example [Weights and Biases page](https://wandb.ai/sayakpaul/custom-diffusion/runs/3990tzkg) where you can check out the intermediate results along with other training details.
|
||||
@@ -199,7 +210,8 @@ accelerate launch train_custom_diffusion.py \
|
||||
--scale_lr --hflip --noaug \
|
||||
--freeze_model crossattn \
|
||||
--modifier_token "<new1>" \
|
||||
--enable_xformers_memory_efficient_attention
|
||||
--enable_xformers_memory_efficient_attention \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
## Inference
|
||||
|
||||
91
docs/source/en/training/distributed_inference.mdx
Normal file
91
docs/source/en/training/distributed_inference.mdx
Normal file
@@ -0,0 +1,91 @@
|
||||
# Distributed inference with multiple GPUs
|
||||
|
||||
On distributed setups, you can run inference across multiple GPUs with 🤗 [Accelerate](https://huggingface.co/docs/accelerate/index) or [PyTorch Distributed](https://pytorch.org/tutorials/beginner/dist_overview.html), which is useful for generating with multiple prompts in parallel.
|
||||
|
||||
This guide will show you how to use 🤗 Accelerate and PyTorch Distributed for distributed inference.
|
||||
|
||||
## 🤗 Accelerate
|
||||
|
||||
🤗 [Accelerate](https://huggingface.co/docs/accelerate/index) is a library designed to make it easy to train or run inference across distributed setups. It simplifies the process of setting up the distributed environment, allowing you to focus on your PyTorch code.
|
||||
|
||||
To begin, create a Python file and initialize an [`accelerate.PartialState`] to create a distributed environment; your setup is automatically detected so you don't need to explicitly define the `rank` or `world_size`. Move the [`DiffusionPipeline`] to `distributed_state.device` to assign a GPU to each process.
|
||||
|
||||
Now use the [`~accelerate.PartialState.split_between_processes`] utility as a context manager to automatically distribute the prompts between the number of processes.
|
||||
|
||||
```py
|
||||
from accelerate import PartialState
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
||||
distributed_state = PartialState()
|
||||
pipeline.to(distributed_state.device)
|
||||
|
||||
with distributed_state.split_between_processes(["a dog", "a cat"]) as prompt:
|
||||
result = pipeline(prompt).images[0]
|
||||
result.save(f"result_{distributed_state.process_index}.png")
|
||||
```
|
||||
|
||||
Use the `--num_processes` argument to specify the number of GPUs to use, and call `accelerate launch` to run the script:
|
||||
|
||||
```bash
|
||||
accelerate launch run_distributed.py --num_processes=2
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
To learn more, take a look at the [Distributed Inference with 🤗 Accelerate](https://huggingface.co/docs/accelerate/en/usage_guides/distributed_inference#distributed-inference-with-accelerate) guide.
|
||||
|
||||
</Tip>
|
||||
|
||||
## PyTorch Distributed
|
||||
|
||||
PyTorch supports [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) which enables data parallelism.
|
||||
|
||||
To start, create a Python file and import `torch.distributed` and `torch.multiprocessing` to set up the distributed process group and to spawn the processes for inference on each GPU. You should also initialize a [`DiffusionPipeline`]:
|
||||
|
||||
```py
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.multiprocessing as mp
|
||||
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
sd = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
||||
```
|
||||
|
||||
You'll want to create a function to run inference; [`init_process_group`](https://pytorch.org/docs/stable/distributed.html?highlight=init_process_group#torch.distributed.init_process_group) handles creating a distributed environment with the type of backend to use, the `rank` of the current process, and the `world_size` or the number of processes participating. If you're running inference in parallel over 2 GPUs, then the `world_size` is 2.
|
||||
|
||||
Move the [`DiffusionPipeline`] to `rank` and use `get_rank` to assign a GPU to each process, where each process handles a different prompt:
|
||||
|
||||
```py
|
||||
def run_inference(rank, world_size):
|
||||
dist.init_process_group("nccl", rank=rank, world_size=world_size)
|
||||
|
||||
sd.to(rank)
|
||||
|
||||
if torch.distributed.get_rank() == 0:
|
||||
prompt = "a dog"
|
||||
elif torch.distributed.get_rank() == 1:
|
||||
prompt = "a cat"
|
||||
|
||||
image = sd(prompt).images[0]
|
||||
image.save(f"./{'_'.join(prompt)}.png")
|
||||
```
|
||||
|
||||
To run the distributed inference, call [`mp.spawn`](https://pytorch.org/docs/stable/multiprocessing.html#torch.multiprocessing.spawn) to run the `run_inference` function on the number of GPUs defined in `world_size`:
|
||||
|
||||
```py
|
||||
def main():
|
||||
world_size = 2
|
||||
mp.spawn(run_inference, args=(world_size,), nprocs=world_size, join=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
```
|
||||
|
||||
Once you've completed the inference script, use the `--nproc_per_node` argument to specify the number of GPUs to use and call `torchrun` to run the script:
|
||||
|
||||
```bash
|
||||
torchrun run_distributed.py --nproc_per_node=2
|
||||
```
|
||||
@@ -64,6 +64,8 @@ snapshot_download(
|
||||
)
|
||||
```
|
||||
|
||||
To use your own dataset, take a look at the [Create a dataset for training](create_dataset) guide.
|
||||
|
||||
## Finetuning
|
||||
|
||||
<Tip warning={true}>
|
||||
@@ -76,7 +78,7 @@ DreamBooth finetuning is very sensitive to hyperparameters and easy to overfit.
|
||||
<pt>
|
||||
Set the `INSTANCE_DIR` environment variable to the path of the directory containing the dog images.
|
||||
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`~diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path`] argument.
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`] argument. The `instance_prompt` argument is a text prompt that contains a unique identifier, such as `sks`, and the class the image belongs to, which in this example is `a photo of a sks dog`.
|
||||
|
||||
```bash
|
||||
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
||||
@@ -98,7 +100,8 @@ accelerate launch train_dreambooth.py \
|
||||
--learning_rate=5e-6 \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--max_train_steps=400
|
||||
--max_train_steps=400 \
|
||||
--push_to_hub
|
||||
```
|
||||
</pt>
|
||||
<jax>
|
||||
@@ -110,7 +113,7 @@ Before running the script, make sure you have the requirements installed:
|
||||
pip install -U -r requirements.txt
|
||||
```
|
||||
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`~diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path`] argument.
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`] argument. The `instance_prompt` argument is a text prompt that contains a unique identifier, such as `sks`, and the class the image belongs to, which in this example is `a photo of a sks dog`.
|
||||
|
||||
Now you can launch the training script with the following command:
|
||||
|
||||
@@ -127,7 +130,8 @@ python train_dreambooth_flax.py \
|
||||
--resolution=512 \
|
||||
--train_batch_size=1 \
|
||||
--learning_rate=5e-6 \
|
||||
--max_train_steps=400
|
||||
--max_train_steps=400 \
|
||||
--push_to_hub
|
||||
```
|
||||
</jax>
|
||||
</frameworkcontent>
|
||||
@@ -161,7 +165,8 @@ accelerate launch train_dreambooth.py \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
--max_train_steps=800 \
|
||||
--push_to_hub
|
||||
```
|
||||
</pt>
|
||||
<jax>
|
||||
@@ -183,7 +188,8 @@ python train_dreambooth_flax.py \
|
||||
--train_batch_size=1 \
|
||||
--learning_rate=5e-6 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
--max_train_steps=800 \
|
||||
--push_to_hub
|
||||
```
|
||||
</jax>
|
||||
</frameworkcontent>
|
||||
@@ -219,13 +225,14 @@ accelerate launch train_dreambooth.py \
|
||||
--class_prompt="a photo of dog" \
|
||||
--resolution=512 \
|
||||
--train_batch_size=1 \
|
||||
--use_8bit_adam
|
||||
--use_8bit_adam \
|
||||
--gradient_checkpointing \
|
||||
--learning_rate=2e-6 \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
--max_train_steps=800 \
|
||||
--push_to_hub
|
||||
```
|
||||
</pt>
|
||||
<jax>
|
||||
@@ -248,7 +255,8 @@ python train_dreambooth_flax.py \
|
||||
--train_batch_size=1 \
|
||||
--learning_rate=2e-6 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
--max_train_steps=800 \
|
||||
--push_to_hub
|
||||
```
|
||||
</jax>
|
||||
</frameworkcontent>
|
||||
@@ -387,7 +395,8 @@ accelerate launch train_dreambooth.py \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
--max_train_steps=800 \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
### 12GB GPU
|
||||
@@ -418,7 +427,8 @@ accelerate launch train_dreambooth.py \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
--max_train_steps=800 \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
### 8 GB GPU
|
||||
@@ -464,7 +474,8 @@ accelerate launch train_dreambooth.py \
|
||||
--lr_warmup_steps=0 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800 \
|
||||
--mixed_precision=fp16
|
||||
--mixed_precision=fp16 \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
## Inference
|
||||
@@ -488,3 +499,207 @@ image.save("dog-bucket.png")
|
||||
```
|
||||
|
||||
You may also run inference from any of the [saved training checkpoints](#inference-from-a-saved-checkpoint).
|
||||
|
||||
## IF
|
||||
|
||||
You can use the lora and full dreambooth scripts to train the text to image [IF model](https://huggingface.co/DeepFloyd/IF-I-XL-v1.0) and the stage II upscaler
|
||||
[IF model](https://huggingface.co/DeepFloyd/IF-II-L-v1.0).
|
||||
|
||||
Note that IF has a predicted variance, and our finetuning scripts only train the models predicted error, so for finetuned IF models we switch to a fixed
|
||||
variance schedule. The full finetuning scripts will update the scheduler config for the full saved model. However, when loading saved LoRA weights, you
|
||||
must also update the pipeline's scheduler config.
|
||||
|
||||
```py
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0")
|
||||
|
||||
pipe.load_lora_weights("<lora weights path>")
|
||||
|
||||
# Update scheduler config to fixed variance schedule
|
||||
pipe.scheduler = pipe.scheduler.__class__.from_config(pipe.scheduler.config, variance_type="fixed_small")
|
||||
```
|
||||
|
||||
Additionally, a few alternative cli flags are needed for IF.
|
||||
|
||||
`--resolution=64`: IF is a pixel space diffusion model. In order to operate on un-compressed pixels, the input images are of a much smaller resolution.
|
||||
|
||||
`--pre_compute_text_embeddings`: IF uses [T5](https://huggingface.co/docs/transformers/model_doc/t5) for its text encoder. In order to save GPU memory, we pre compute all text embeddings and then de-allocate
|
||||
T5.
|
||||
|
||||
`--tokenizer_max_length=77`: T5 has a longer default text length, but the default IF encoding procedure uses a smaller number.
|
||||
|
||||
`--text_encoder_use_attention_mask`: T5 passes the attention mask to the text encoder.
|
||||
|
||||
### Tips and Tricks
|
||||
We find LoRA to be sufficient for finetuning the stage I model as the low resolution of the model makes representing finegrained detail hard regardless.
|
||||
|
||||
For common and/or not-visually complex object concepts, you can get away with not-finetuning the upscaler. Just be sure to adjust the prompt passed to the
|
||||
upscaler to remove the new token from the instance prompt. I.e. if your stage I prompt is "a sks dog", use "a dog" for your stage II prompt.
|
||||
|
||||
For finegrained detail like faces that aren't present in the original training set, we find that full finetuning of the stage II upscaler is better than
|
||||
LoRA finetuning stage II.
|
||||
|
||||
For finegrained detail like faces, we find that lower learning rates along with larger batch sizes work best.
|
||||
|
||||
For stage II, we find that lower learning rates are also needed.
|
||||
|
||||
We found experimentally that the DDPM scheduler with the default larger number of denoising steps to sometimes work better than the DPM Solver scheduler
|
||||
used in the training scripts.
|
||||
|
||||
### Stage II additional validation images
|
||||
|
||||
The stage II validation requires images to upscale, we can download a downsized version of the training set:
|
||||
|
||||
```py
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
local_dir = "./dog_downsized"
|
||||
snapshot_download(
|
||||
"diffusers/dog-example-downsized",
|
||||
local_dir=local_dir,
|
||||
repo_type="dataset",
|
||||
ignore_patterns=".gitattributes",
|
||||
)
|
||||
```
|
||||
|
||||
### IF stage I LoRA Dreambooth
|
||||
This training configuration requires ~28 GB VRAM.
|
||||
|
||||
```sh
|
||||
export MODEL_NAME="DeepFloyd/IF-I-XL-v1.0"
|
||||
export INSTANCE_DIR="dog"
|
||||
export OUTPUT_DIR="dreambooth_dog_lora"
|
||||
|
||||
accelerate launch train_dreambooth_lora.py \
|
||||
--report_to wandb \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--instance_prompt="a sks dog" \
|
||||
--resolution=64 \
|
||||
--train_batch_size=4 \
|
||||
--gradient_accumulation_steps=1 \
|
||||
--learning_rate=5e-6 \
|
||||
--scale_lr \
|
||||
--max_train_steps=1200 \
|
||||
--validation_prompt="a sks dog" \
|
||||
--validation_epochs=25 \
|
||||
--checkpointing_steps=100 \
|
||||
--pre_compute_text_embeddings \
|
||||
--tokenizer_max_length=77 \
|
||||
--text_encoder_use_attention_mask
|
||||
```
|
||||
|
||||
### IF stage II LoRA Dreambooth
|
||||
|
||||
`--validation_images`: These images are upscaled during validation steps.
|
||||
|
||||
`--class_labels_conditioning=timesteps`: Pass additional conditioning to the UNet needed for stage II.
|
||||
|
||||
`--learning_rate=1e-6`: Lower learning rate than stage I.
|
||||
|
||||
`--resolution=256`: The upscaler expects higher resolution inputs
|
||||
|
||||
```sh
|
||||
export MODEL_NAME="DeepFloyd/IF-II-L-v1.0"
|
||||
export INSTANCE_DIR="dog"
|
||||
export OUTPUT_DIR="dreambooth_dog_upscale"
|
||||
export VALIDATION_IMAGES="dog_downsized/image_1.png dog_downsized/image_2.png dog_downsized/image_3.png dog_downsized/image_4.png"
|
||||
|
||||
python train_dreambooth_lora.py \
|
||||
--report_to wandb \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--instance_prompt="a sks dog" \
|
||||
--resolution=256 \
|
||||
--train_batch_size=4 \
|
||||
--gradient_accumulation_steps=1 \
|
||||
--learning_rate=1e-6 \
|
||||
--max_train_steps=2000 \
|
||||
--validation_prompt="a sks dog" \
|
||||
--validation_epochs=100 \
|
||||
--checkpointing_steps=500 \
|
||||
--pre_compute_text_embeddings \
|
||||
--tokenizer_max_length=77 \
|
||||
--text_encoder_use_attention_mask \
|
||||
--validation_images $VALIDATION_IMAGES \
|
||||
--class_labels_conditioning=timesteps
|
||||
```
|
||||
|
||||
### IF Stage I Full Dreambooth
|
||||
`--skip_save_text_encoder`: When training the full model, this will skip saving the entire T5 with the finetuned model. You can still load the pipeline
|
||||
with a T5 loaded from the original model.
|
||||
|
||||
`use_8bit_adam`: Due to the size of the optimizer states, we recommend training the full XL IF model with 8bit adam.
|
||||
|
||||
`--learning_rate=1e-7`: For full dreambooth, IF requires very low learning rates. With higher learning rates model quality will degrade. Note that it is
|
||||
likely the learning rate can be increased with larger batch sizes.
|
||||
|
||||
Using 8bit adam and a batch size of 4, the model can be trained in ~48 GB VRAM.
|
||||
|
||||
```sh
|
||||
export MODEL_NAME="DeepFloyd/IF-I-XL-v1.0"
|
||||
|
||||
export INSTANCE_DIR="dog"
|
||||
export OUTPUT_DIR="dreambooth_if"
|
||||
|
||||
accelerate launch train_dreambooth.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--instance_prompt="a photo of sks dog" \
|
||||
--resolution=64 \
|
||||
--train_batch_size=4 \
|
||||
--gradient_accumulation_steps=1 \
|
||||
--learning_rate=1e-7 \
|
||||
--max_train_steps=150 \
|
||||
--validation_prompt "a photo of sks dog" \
|
||||
--validation_steps 25 \
|
||||
--text_encoder_use_attention_mask \
|
||||
--tokenizer_max_length 77 \
|
||||
--pre_compute_text_embeddings \
|
||||
--use_8bit_adam \
|
||||
--set_grads_to_none \
|
||||
--skip_save_text_encoder \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
### IF Stage II Full Dreambooth
|
||||
|
||||
`--learning_rate=5e-6`: With a smaller effective batch size of 4, we found that we required learning rates as low as
|
||||
1e-8.
|
||||
|
||||
`--resolution=256`: The upscaler expects higher resolution inputs
|
||||
|
||||
`--train_batch_size=2` and `--gradient_accumulation_steps=6`: We found that full training of stage II particularly with
|
||||
faces required large effective batch sizes.
|
||||
|
||||
```sh
|
||||
export MODEL_NAME="DeepFloyd/IF-II-L-v1.0"
|
||||
export INSTANCE_DIR="dog"
|
||||
export OUTPUT_DIR="dreambooth_dog_upscale"
|
||||
export VALIDATION_IMAGES="dog_downsized/image_1.png dog_downsized/image_2.png dog_downsized/image_3.png dog_downsized/image_4.png"
|
||||
|
||||
accelerate launch train_dreambooth.py \
|
||||
--report_to wandb \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--instance_prompt="a sks dog" \
|
||||
--resolution=256 \
|
||||
--train_batch_size=2 \
|
||||
--gradient_accumulation_steps=6 \
|
||||
--learning_rate=5e-6 \
|
||||
--max_train_steps=2000 \
|
||||
--validation_prompt="a sks dog" \
|
||||
--validation_steps=150 \
|
||||
--checkpointing_steps=500 \
|
||||
--pre_compute_text_embeddings \
|
||||
--tokenizer_max_length=77 \
|
||||
--text_encoder_use_attention_mask \
|
||||
--validation_images $VALIDATION_IMAGES \
|
||||
--class_labels_conditioning timesteps \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
@@ -24,7 +24,7 @@ The output is an "edited" image that reflects the edit instruction applied on th
|
||||
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/output-gs%407-igs%401-steps%4050.png" alt="instructpix2pix-output" width=600/>
|
||||
</p>
|
||||
|
||||
The `train_instruct_pix2pix.py` script shows how to implement the training procedure and adapt it for Stable Diffusion.
|
||||
The `train_instruct_pix2pix.py` script (you can find the it [here](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py)) shows how to implement the training procedure and adapt it for Stable Diffusion.
|
||||
|
||||
***Disclaimer: Even though `train_instruct_pix2pix.py` implements the InstructPix2Pix
|
||||
training procedure while being faithful to the [original implementation](https://github.com/timothybrooks/instruct-pix2pix) we have only tested it on a [small-scale dataset](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples). This can impact the end results. For better results, we recommend longer training runs with a larger dataset. [Here](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered) you can find a large dataset for InstructPix2Pix training.***
|
||||
@@ -44,7 +44,12 @@ cd diffusers
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
Then cd in the example folder and run
|
||||
Then cd in the example folder
|
||||
```bash
|
||||
cd examples/instruct_pix2pix
|
||||
```
|
||||
|
||||
Now run
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
@@ -72,16 +77,16 @@ write_basic_config()
|
||||
### Toy example
|
||||
|
||||
As mentioned before, we'll use a [small toy dataset](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples) for training. The dataset
|
||||
is a smaller version of the [original dataset](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered) used in the InstructPix2Pix paper.
|
||||
is a smaller version of the [original dataset](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered) used in the InstructPix2Pix paper. To use your own dataset, take a look at the [Create a dataset for training](create_dataset) guide.
|
||||
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`~diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path`] argument. You'll also need to specify the dataset name in `DATASET_ID`:
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) argument. You'll also need to specify the dataset name in `DATASET_ID`:
|
||||
|
||||
```bash
|
||||
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
|
||||
export DATASET_ID="fusing/instructpix2pix-1000-samples"
|
||||
```
|
||||
|
||||
Now, we can launch training:
|
||||
Now, we can launch training. The script saves all the components (`feature_extractor`, `scheduler`, `text_encoder`, `unet`, etc) in a subfolder in your repository.
|
||||
|
||||
```bash
|
||||
accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \
|
||||
@@ -95,7 +100,8 @@ accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \
|
||||
--learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \
|
||||
--conditioning_dropout_prob=0.05 \
|
||||
--mixed_precision=fp16 \
|
||||
--seed=42
|
||||
--seed=42 \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
Additionally, we support performing validation inference to monitor training progress
|
||||
@@ -116,7 +122,8 @@ accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \
|
||||
--val_image_url="https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" \
|
||||
--validation_prompt="make the mountains snowy" \
|
||||
--seed=42 \
|
||||
--report_to=wandb
|
||||
--report_to=wandb \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
We recommend this type of validation as it can be useful for model debugging. Note that you need `wandb` installed to use this. You can install `wandb` by running `pip install wandb`.
|
||||
@@ -143,7 +150,8 @@ accelerate launch --mixed_precision="fp16" --multi_gpu train_instruct_pix2pix.py
|
||||
--learning_rate=5e-05 --lr_warmup_steps=0 \
|
||||
--conditioning_dropout_prob=0.05 \
|
||||
--mixed_precision=fp16 \
|
||||
--seed=42
|
||||
--seed=42 \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
## Inference
|
||||
@@ -199,3 +207,5 @@ speed and quality during performance:
|
||||
|
||||
Particularly, `image_guidance_scale` and `guidance_scale` can have a profound impact
|
||||
on the generated ("edited") image (see [here](https://twitter.com/RisingSayak/status/1628392199196151808?s=20) for an example).
|
||||
|
||||
If you're looking for some interesting ways to use the InstructPix2Pix training methodology, we welcome you to check out this blog post: [Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd).
|
||||
@@ -17,8 +17,7 @@ specific language governing permissions and limitations under the License.
|
||||
<Tip warning={true}>
|
||||
|
||||
Currently, LoRA is only supported for the attention layers of the [`UNet2DConditionalModel`]. We also
|
||||
support LoRA fine-tuning of the text encoder for DreamBooth in a limited capacity. For more details on how we support
|
||||
LoRA fine-tuning of the text encoder, refer to the discussion on [this PR](https://github.com/huggingface/diffusers/pull/2918).
|
||||
support fine-tuning the text encoder for DreamBooth with LoRA in a limited capacity. Fine-tuning the text encoder for DreamBooth generally yields better results, but it can increase compute usage.
|
||||
|
||||
</Tip>
|
||||
|
||||
@@ -52,7 +51,7 @@ Finetuning a model like Stable Diffusion, which has billions of parameters, can
|
||||
|
||||
Let's finetune [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) on the [Pokémon BLIP captions](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) dataset to generate your own Pokémon.
|
||||
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`~diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path`] argument. You'll also need to set the `DATASET_NAME` environment variable to the name of the dataset you want to train on.
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) argument. You'll also need to set the `DATASET_NAME` environment variable to the name of the dataset you want to train on. To use your own dataset, take a look at the [Create a dataset for training](create_dataset) guide.
|
||||
|
||||
The `OUTPUT_DIR` and `HUB_MODEL_ID` variables are optional and specify where to save the model to on the Hub:
|
||||
|
||||
@@ -69,7 +68,7 @@ There are some flags to be aware of before you start training:
|
||||
* `--report_to=wandb` reports and logs the training results to your Weights & Biases dashboard (as an example, take a look at this [report](https://wandb.ai/pcuenq/text2image-fine-tune/runs/b4k1w0tn?workspace=user-pcuenq)).
|
||||
* `--learning_rate=1e-04`, you can afford to use a higher learning rate than you normally would with LoRA.
|
||||
|
||||
Now you're ready to launch the training (you can find the full training script [here](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py)):
|
||||
Now you're ready to launch the training (you can find the full training script [here](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py)). Training takes about 5 hours on a 2080 Ti GPU with 11GB of RAM, and it'll create and save model checkpoints and the `pytorch_lora_weights` in your repository.
|
||||
|
||||
```bash
|
||||
accelerate launch --mixed_precision="fp16" train_text_to_image_lora.py \
|
||||
@@ -115,7 +114,7 @@ Load the LoRA weights from your finetuned model *on top of the base model weight
|
||||
</Tip>
|
||||
|
||||
```py
|
||||
>>> pipe.unet.load_attn_procs(model_path)
|
||||
>>> pipe.unet.load_attn_procs(lora_model_path)
|
||||
>>> pipe.to("cuda")
|
||||
# use half the weights from the LoRA finetuned model and half the weights from the base model
|
||||
|
||||
@@ -128,6 +127,26 @@ Load the LoRA weights from your finetuned model *on top of the base model weight
|
||||
>>> image.save("blue_pokemon.png")
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
If you are loading the LoRA parameters from the Hub and if the Hub repository has
|
||||
a `base_model` tag (such as [this](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4/blob/main/README.md?code=true#L4)), then
|
||||
you can do:
|
||||
|
||||
```py
|
||||
from huggingface_hub.repocard import RepoCard
|
||||
|
||||
lora_model_id = "sayakpaul/sd-model-finetuned-lora-t4"
|
||||
card = RepoCard.load(lora_model_id)
|
||||
base_model_id = card.data.to_dict()["base_model"]
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16)
|
||||
...
|
||||
```
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
## DreamBooth
|
||||
|
||||
[DreamBooth](https://arxiv.org/abs/2208.12242) is a finetuning technique for personalizing a text-to-image model like Stable Diffusion to generate photorealistic images of a subject in different contexts, given a few images of the subject. However, DreamBooth is very sensitive to hyperparameters and it is easy to overfit. Some important hyperparameters to consider include those that affect the training time (learning rate, number of training steps), and inference time (number of steps, scheduler type).
|
||||
@@ -140,9 +159,9 @@ Load the LoRA weights from your finetuned model *on top of the base model weight
|
||||
|
||||
### Training[[dreambooth-training]]
|
||||
|
||||
Let's finetune [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) with DreamBooth and LoRA with some 🐶 [dog images](https://drive.google.com/drive/folders/1BO_dyz-p65qhBRRMRA4TbZ8qW4rB99JZ). Download and save these images to a directory.
|
||||
Let's finetune [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) with DreamBooth and LoRA with some 🐶 [dog images](https://drive.google.com/drive/folders/1BO_dyz-p65qhBRRMRA4TbZ8qW4rB99JZ). Download and save these images to a directory. To use your own dataset, take a look at the [Create a dataset for training](create_dataset) guide.
|
||||
|
||||
To start, specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`~diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path`] argument. You'll also need to set `INSTANCE_DIR` to the path of the directory containing the images.
|
||||
To start, specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) argument. You'll also need to set `INSTANCE_DIR` to the path of the directory containing the images.
|
||||
|
||||
The `OUTPUT_DIR` variables is optional and specifies where to save the model to on the Hub:
|
||||
|
||||
@@ -158,7 +177,11 @@ There are some flags to be aware of before you start training:
|
||||
* `--report_to=wandb` reports and logs the training results to your Weights & Biases dashboard (as an example, take a look at this [report](https://wandb.ai/pcuenq/text2image-fine-tune/runs/b4k1w0tn?workspace=user-pcuenq)).
|
||||
* `--learning_rate=1e-04`, you can afford to use a higher learning rate than you normally would with LoRA.
|
||||
|
||||
Now you're ready to launch the training (you can find the full training script [here](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py)):
|
||||
Now you're ready to launch the training (you can find the full training script [here](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py)). The script creates and saves model checkpoints and the `pytorch_lora_weights.bin` file in your repository.
|
||||
|
||||
It's also possible to additionally fine-tune the text encoder with LoRA. This, in most cases, leads
|
||||
to better results with a slight increase in the compute. To allow fine-tuning the text encoder with LoRA,
|
||||
specify the `--train_text_encoder` while launching the `train_dreambooth_lora.py` script.
|
||||
|
||||
```bash
|
||||
accelerate launch train_dreambooth_lora.py \
|
||||
@@ -179,12 +202,7 @@ accelerate launch train_dreambooth_lora.py \
|
||||
--validation_epochs=50 \
|
||||
--seed="0" \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
It's also possible to additionally fine-tune the text encoder with LoRA. This, in most cases, leads
|
||||
to better results with a slight increase in the compute. To allow fine-tuning the text encoder with LoRA,
|
||||
specify the `--train_text_encoder` while launching the `train_dreambooth_lora.py` script.
|
||||
|
||||
```
|
||||
|
||||
### Inference[[dreambooth-inference]]
|
||||
|
||||
@@ -208,7 +226,7 @@ Load the LoRA weights from your finetuned DreamBooth model *on top of the base m
|
||||
</Tip>
|
||||
|
||||
```py
|
||||
>>> pipe.unet.load_attn_procs(model_path)
|
||||
>>> pipe.unet.load_attn_procs(lora_model_path)
|
||||
>>> pipe.to("cuda")
|
||||
# use half the weights from the LoRA finetuned model and half the weights from the base model
|
||||
|
||||
@@ -222,4 +240,115 @@ Load the LoRA weights from your finetuned DreamBooth model *on top of the base m
|
||||
|
||||
>>> image = pipe("A picture of a sks dog in a bucket.", num_inference_steps=25, guidance_scale=7.5).images[0]
|
||||
>>> image.save("bucket-dog.png")
|
||||
```
|
||||
|
||||
If you used `--train_text_encoder` during training, then use `pipe.load_lora_weights()` to load the LoRA
|
||||
weights. For example:
|
||||
|
||||
```python
|
||||
from huggingface_hub.repocard import RepoCard
|
||||
from diffusers import StableDiffusionPipeline
|
||||
import torch
|
||||
|
||||
lora_model_id = "sayakpaul/dreambooth-text-encoder-test"
|
||||
card = RepoCard.load(lora_model_id)
|
||||
base_model_id = card.data.to_dict()["base_model"]
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16)
|
||||
pipe = pipe.to("cuda")
|
||||
pipe.load_lora_weights(lora_model_id)
|
||||
image = pipe("A picture of a sks dog in a bucket", num_inference_steps=25).images[0]
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
If your LoRA parameters involve the UNet as well as the Text Encoder, then passing
|
||||
`cross_attention_kwargs={"scale": 0.5}` will apply the `scale` value to both the UNet
|
||||
and the Text Encoder.
|
||||
|
||||
</Tip>
|
||||
|
||||
Note that the use of [`~diffusers.loaders.LoraLoaderMixin.load_lora_weights`] is preferred to [`~diffusers.loaders.UNet2DConditionLoadersMixin.load_attn_procs`] for loading LoRA parameters. This is because
|
||||
[`~diffusers.loaders.LoraLoaderMixin.load_lora_weights`] can handle the following situations:
|
||||
|
||||
* LoRA parameters that don't have separate identifiers for the UNet and the text encoder (such as [`"patrickvonplaten/lora_dreambooth_dog_example"`](https://huggingface.co/patrickvonplaten/lora_dreambooth_dog_example)). So, you can just do:
|
||||
|
||||
```py
|
||||
pipe.load_lora_weights(lora_model_path)
|
||||
```
|
||||
|
||||
* LoRA parameters that have separate identifiers for the UNet and the text encoder such as: [`"sayakpaul/dreambooth"`](https://huggingface.co/sayakpaul/dreambooth).
|
||||
|
||||
**Note** that it is possible to provide a local directory path to [`~diffusers.loaders.LoraLoaderMixin.load_lora_weights`] as well as [`~diffusers.loaders.UNet2DConditionLoadersMixin.load_attn_procs`]. To know about the supported inputs,
|
||||
refer to the respective docstrings.
|
||||
|
||||
## Supporting A1111 themed LoRA checkpoints from Diffusers
|
||||
|
||||
To provide seamless interoperability with A1111 to our users, we support loading A1111 formatted
|
||||
LoRA checkpoints using [`~diffusers.loaders.LoraLoaderMixin.load_lora_weights`] in a limited capacity.
|
||||
In this section, we explain how to load an A1111 formatted LoRA checkpoint from [CivitAI](https://civitai.com/)
|
||||
in Diffusers and perform inference with it.
|
||||
|
||||
First, download a checkpoint. We'll use
|
||||
[this one](https://civitai.com/models/13239/light-and-shadow) for demonstration purposes.
|
||||
|
||||
```bash
|
||||
wget https://civitai.com/api/download/models/15603 -O light_and_shadow.safetensors
|
||||
```
|
||||
|
||||
Next, we initialize a [`~DiffusionPipeline`]:
|
||||
|
||||
```python
|
||||
import torch
|
||||
|
||||
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
|
||||
|
||||
pipeline = StableDiffusionPipeline.from_pretrained(
|
||||
"gsdf/Counterfeit-V2.5", torch_dtype=torch.float16, safety_checker=None
|
||||
).to("cuda")
|
||||
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(
|
||||
pipeline.scheduler.config, use_karras_sigmas=True
|
||||
)
|
||||
```
|
||||
|
||||
We then load the checkpoint downloaded from CivitAI:
|
||||
|
||||
```python
|
||||
pipeline.load_lora_weights(".", weight_name="light_and_shadow.safetensors")
|
||||
```
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
If you're loading a checkpoint in the `safetensors` format, please ensure you have `safetensors` installed.
|
||||
|
||||
</Tip>
|
||||
|
||||
And then it's time for running inference:
|
||||
|
||||
```python
|
||||
prompt = "masterpiece, best quality, 1girl, at dusk"
|
||||
negative_prompt = ("(low quality, worst quality:1.4), (bad anatomy), (inaccurate limb:1.2), "
|
||||
"bad composition, inaccurate eyes, extra digit, fewer digits, (extra arms:1.2), large breasts")
|
||||
|
||||
images = pipeline(prompt=prompt,
|
||||
negative_prompt=negative_prompt,
|
||||
width=512,
|
||||
height=768,
|
||||
num_inference_steps=15,
|
||||
num_images_per_prompt=4,
|
||||
generator=torch.manual_seed(0)
|
||||
).images
|
||||
```
|
||||
|
||||
Below is a comparison between the LoRA and the non-LoRA results:
|
||||
|
||||

|
||||
|
||||
You have a similar checkpoint stored on the Hugging Face Hub, you can load it
|
||||
directly with [`~diffusers.loaders.LoraLoaderMixin.load_lora_weights`] like so:
|
||||
|
||||
```python
|
||||
lora_model_id = "sayakpaul/civitai-light-shadow-lora"
|
||||
lora_filename = "light_and_shadow.safetensors"
|
||||
pipeline.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
||||
```
|
||||
@@ -74,15 +74,27 @@ To load a checkpoint to resume training, pass the argument `--resume_from_checkp
|
||||
<pt>
|
||||
Launch the [PyTorch training script](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) for a fine-tuning run on the [Pokémon BLIP captions](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) dataset like this.
|
||||
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`~diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path`] argument.
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) argument.
|
||||
|
||||
<literalinclude>
|
||||
{"path": "../../../../examples/text_to_image/README.md",
|
||||
"language": "bash",
|
||||
"start-after": "accelerate_snippet_start",
|
||||
"end-before": "accelerate_snippet_end",
|
||||
"dedent": 0}
|
||||
</literalinclude>
|
||||
```bash
|
||||
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
||||
export dataset_name="lambdalabs/pokemon-blip-captions"
|
||||
|
||||
accelerate launch --mixed_precision="fp16" train_text_to_image.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--dataset_name=$dataset_name \
|
||||
--use_ema \
|
||||
--resolution=512 --center_crop --random_flip \
|
||||
--train_batch_size=1 \
|
||||
--gradient_accumulation_steps=4 \
|
||||
--gradient_checkpointing \
|
||||
--max_train_steps=15000 \
|
||||
--learning_rate=1e-05 \
|
||||
--max_grad_norm=1 \
|
||||
--lr_scheduler="constant" --lr_warmup_steps=0 \
|
||||
--output_dir="sd-pokemon-model" \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
To finetune on your own dataset, prepare the dataset according to the format required by 🤗 [Datasets](https://huggingface.co/docs/datasets/index). You can [upload your dataset to the Hub](https://huggingface.co/docs/datasets/image_dataset#upload-dataset-to-the-hub), or you can [prepare a local folder with your files](https://huggingface.co/docs/datasets/image_dataset#imagefolder).
|
||||
|
||||
@@ -105,8 +117,10 @@ accelerate launch train_text_to_image.py \
|
||||
--max_train_steps=15000 \
|
||||
--learning_rate=1e-05 \
|
||||
--max_grad_norm=1 \
|
||||
--lr_scheduler="constant" --lr_warmup_steps=0 \
|
||||
--output_dir=${OUTPUT_DIR}
|
||||
--lr_scheduler="constant"
|
||||
--lr_warmup_steps=0 \
|
||||
--output_dir=${OUTPUT_DIR} \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
#### Training with multiple GPUs
|
||||
@@ -129,8 +143,10 @@ accelerate launch --mixed_precision="fp16" --multi_gpu train_text_to_image.py \
|
||||
--max_train_steps=15000 \
|
||||
--learning_rate=1e-05 \
|
||||
--max_grad_norm=1 \
|
||||
--lr_scheduler="constant" --lr_warmup_steps=0 \
|
||||
--output_dir="sd-pokemon-model"
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--output_dir="sd-pokemon-model" \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
</pt>
|
||||
@@ -143,7 +159,7 @@ Before running the script, make sure you have the requirements installed:
|
||||
pip install -U -r requirements_flax.txt
|
||||
```
|
||||
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`~diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path`] argument.
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) argument.
|
||||
|
||||
Now you can launch the [Flax training script](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_flax.py) like this:
|
||||
|
||||
@@ -159,7 +175,8 @@ python train_text_to_image_flax.py \
|
||||
--max_train_steps=15000 \
|
||||
--learning_rate=1e-05 \
|
||||
--max_grad_norm=1 \
|
||||
--output_dir="sd-pokemon-model"
|
||||
--output_dir="sd-pokemon-model" \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
To finetune on your own dataset, prepare the dataset according to the format required by 🤗 [Datasets](https://huggingface.co/docs/datasets/index). You can [upload your dataset to the Hub](https://huggingface.co/docs/datasets/image_dataset#upload-dataset-to-the-hub), or you can [prepare a local folder with your files](https://huggingface.co/docs/datasets/image_dataset#imagefolder).
|
||||
@@ -179,7 +196,8 @@ python train_text_to_image_flax.py \
|
||||
--max_train_steps=15000 \
|
||||
--learning_rate=1e-05 \
|
||||
--max_grad_norm=1 \
|
||||
--output_dir="sd-pokemon-model"
|
||||
--output_dir="sd-pokemon-model" \
|
||||
--push_to_hub
|
||||
```
|
||||
</jax>
|
||||
</frameworkcontent>
|
||||
|
||||
@@ -81,7 +81,7 @@ To resume training from a saved checkpoint, pass the following argument to the t
|
||||
|
||||
## Finetuning
|
||||
|
||||
For your training dataset, download these [images of a cat toy](https://huggingface.co/datasets/diffusers/cat_toy_example) and store them in a directory:
|
||||
For your training dataset, download these [images of a cat toy](https://huggingface.co/datasets/diffusers/cat_toy_example) and store them in a directory. To use your own dataset, take a look at the [Create a dataset for training](create_dataset) guide.
|
||||
|
||||
```py
|
||||
from huggingface_hub import snapshot_download
|
||||
@@ -92,9 +92,9 @@ snapshot_download(
|
||||
)
|
||||
```
|
||||
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`~diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path`] argument, and the `DATA_DIR` environment variable to the path of the directory containing the images.
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) argument, and the `DATA_DIR` environment variable to the path of the directory containing the images.
|
||||
|
||||
Now you can launch the [training script](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py):
|
||||
Now you can launch the [training script](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py). The script creates and saves the following files to your repository: `learned_embeds.bin`, `token_identifier.txt`, and `type_of_concept.txt`.
|
||||
|
||||
<Tip>
|
||||
|
||||
@@ -120,7 +120,8 @@ accelerate launch textual_inversion.py \
|
||||
--learning_rate=5.0e-04 --scale_lr \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--output_dir="textual_inversion_cat"
|
||||
--output_dir="textual_inversion_cat" \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
<Tip>
|
||||
@@ -144,7 +145,7 @@ Before you begin, make sure you install the Flax specific dependencies:
|
||||
pip install -U -r requirements_flax.txt
|
||||
```
|
||||
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`~diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path`] argument.
|
||||
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) argument.
|
||||
|
||||
Then you can launch the [training script](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion_flax.py):
|
||||
|
||||
@@ -161,7 +162,8 @@ python textual_inversion_flax.py \
|
||||
--train_batch_size=1 \
|
||||
--max_train_steps=3000 \
|
||||
--learning_rate=5.0e-04 --scale_lr \
|
||||
--output_dir="textual_inversion_cat"
|
||||
--output_dir="textual_inversion_cat" \
|
||||
--push_to_hub
|
||||
```
|
||||
</jax>
|
||||
</frameworkcontent>
|
||||
@@ -245,7 +247,7 @@ from flax.training.common_utils import shard
|
||||
from diffusers import FlaxStableDiffusionPipeline
|
||||
|
||||
model_path = "path-to-your-trained-model"
|
||||
pipe, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16)
|
||||
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16)
|
||||
|
||||
prompt = "A <cat-toy> backpack"
|
||||
prng_seed = jax.random.PRNGKey(0)
|
||||
|
||||
@@ -74,7 +74,9 @@ The full training state is saved in a subfolder in the `output_dir` every 500 st
|
||||
|
||||
## Finetuning
|
||||
|
||||
You're ready to launch the [training script](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py) now! Specify the dataset name to finetune on with the `--dataset_name` argument and then save it to the path in `--output_dir`.
|
||||
You're ready to launch the [training script](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py) now! Specify the dataset name to finetune on with the `--dataset_name` argument and then save it to the path in `--output_dir`. To use your own dataset, take a look at the [Create a dataset for training](create_dataset) guide.
|
||||
|
||||
The training script creates and saves a `diffusion_pytorch_model.bin` file in your repository.
|
||||
|
||||
<Tip>
|
||||
|
||||
@@ -139,83 +141,6 @@ accelerate launch --mixed_precision="fp16" --multi_gpu train_unconditional.py \
|
||||
--learning_rate=1e-4 \
|
||||
--lr_warmup_steps=500 \
|
||||
--mixed_precision="fp16" \
|
||||
--logger="wandb"
|
||||
```
|
||||
|
||||
## Finetuning with your own data
|
||||
|
||||
There are two ways to finetune a model on your own dataset:
|
||||
|
||||
- provide your own folder of images to the `--train_data_dir` argument
|
||||
- upload your dataset to the Hub and pass the dataset repository id to the `--dataset_name` argument.
|
||||
|
||||
<Tip>
|
||||
|
||||
💡 Learn more about how to create an image dataset for training in the [Create an image dataset](https://huggingface.co/docs/datasets/image_dataset) guide.
|
||||
|
||||
</Tip>
|
||||
|
||||
Below, we explain both in more detail.
|
||||
|
||||
### Provide the dataset as a folder
|
||||
|
||||
If you provide your own dataset as a folder, the script expects the following directory structure:
|
||||
|
||||
```bash
|
||||
data_dir/xxx.png
|
||||
data_dir/xxy.png
|
||||
data_dir/[...]/xxz.png
|
||||
```
|
||||
|
||||
Pass the path to the folder containing the images to the `--train_data_dir` argument and launch the training:
|
||||
|
||||
```bash
|
||||
accelerate launch train_unconditional.py \
|
||||
--train_data_dir <path-to-train-directory> \
|
||||
<other-arguments>
|
||||
```
|
||||
|
||||
Internally, the script uses the [`ImageFolder`](https://huggingface.co/docs/datasets/image_load#imagefolder) to automatically build a dataset from the folder.
|
||||
|
||||
### Upload your data to the Hub
|
||||
|
||||
<Tip>
|
||||
|
||||
💡 For more details and context about creating and uploading a dataset to the Hub, take a look at the [Image search with 🤗 Datasets](https://huggingface.co/blog/image-search-datasets) post.
|
||||
|
||||
</Tip>
|
||||
|
||||
To upload your dataset to the Hub, you can start by creating one with the [`ImageFolder`](https://huggingface.co/docs/datasets/image_load#imagefolder) feature, which creates an `image` column containing the PIL-encoded images, from 🤗 Datasets:
|
||||
|
||||
```python
|
||||
from datasets import load_dataset
|
||||
|
||||
# example 1: local folder
|
||||
dataset = load_dataset("imagefolder", data_dir="path_to_your_folder")
|
||||
|
||||
# example 2: local files (supported formats are tar, gzip, zip, xz, rar, zstd)
|
||||
dataset = load_dataset("imagefolder", data_files="path_to_zip_file")
|
||||
|
||||
# example 3: remote files (supported formats are tar, gzip, zip, xz, rar, zstd)
|
||||
dataset = load_dataset(
|
||||
"imagefolder",
|
||||
data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip",
|
||||
)
|
||||
|
||||
# example 4: providing several splits
|
||||
dataset = load_dataset(
|
||||
"imagefolder", data_files={"train": ["path/to/file1", "path/to/file2"], "test": ["path/to/file3", "path/to/file4"]}
|
||||
)
|
||||
```
|
||||
|
||||
Then you can use the [`~datasets.Dataset.push_to_hub`] method to upload it to the Hub:
|
||||
|
||||
```python
|
||||
# assuming you have ran the huggingface-cli login command in a terminal
|
||||
dataset.push_to_hub("name_of_your_dataset")
|
||||
|
||||
# if you want to push to a private repo, simply pass private=True:
|
||||
dataset.push_to_hub("name_of_your_dataset", private=True)
|
||||
```
|
||||
|
||||
Now train your model by simply setting the `--dataset_name` argument to the name of your dataset on the Hub.
|
||||
--logger="wandb" \
|
||||
--push_to_hub
|
||||
```
|
||||
@@ -407,9 +407,9 @@ Once training is complete, take a look at the final 🦋 images 🦋 generated b
|
||||
|
||||
## Next steps
|
||||
|
||||
Unconditional image generation is one example of a task that can be trained. You can explore other tasks and training techniques by visiting the [🧨 Diffusers Training Examples](./training/overview) page. Here are some examples of what you can learn:
|
||||
Unconditional image generation is one example of a task that can be trained. You can explore other tasks and training techniques by visiting the [🧨 Diffusers Training Examples](../training/overview) page. Here are some examples of what you can learn:
|
||||
|
||||
* [Textual Inversion](./training/text_inversion), an algorithm that teaches a model a specific visual concept and integrates it into the generated image.
|
||||
* [DreamBooth](./training/dreambooth), a technique for generating personalized images of a subject given several input images of the subject.
|
||||
* [Guide](./training/text2image) to finetuning a Stable Diffusion model on your own dataset.
|
||||
* [Guide](./training/lora) to using LoRA, a memory-efficient technique for finetuning really large models faster.
|
||||
* [Textual Inversion](../training/text_inversion), an algorithm that teaches a model a specific visual concept and integrates it into the generated image.
|
||||
* [DreamBooth](../training/dreambooth), a technique for generating personalized images of a subject given several input images of the subject.
|
||||
* [Guide](../training/text2image) to finetuning a Stable Diffusion model on your own dataset.
|
||||
* [Guide](../training/lora) to using LoRA, a memory-efficient technique for finetuning really large models faster.
|
||||
|
||||
@@ -20,12 +20,12 @@ The [`DiffusionPipeline`] is the easiest way to use a pre-trained diffusion syst
|
||||
|
||||
Start by creating an instance of [`DiffusionPipeline`] and specify which pipeline [checkpoint](https://huggingface.co/models?library=diffusers&sort=downloads) you would like to download.
|
||||
|
||||
In this guide, you'll use [`DiffusionPipeline`] for text-to-image generation with [Latent Diffusion](https://huggingface.co/CompVis/ldm-text2im-large-256):
|
||||
In this guide, you'll use [`DiffusionPipeline`] for text-to-image generation with [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5):
|
||||
|
||||
```python
|
||||
>>> from diffusers import DiffusionPipeline
|
||||
|
||||
>>> generator = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")
|
||||
>>> generator = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
||||
```
|
||||
|
||||
The [`DiffusionPipeline`] downloads and caches all modeling, tokenization, and scheduling components.
|
||||
|
||||
@@ -37,6 +37,28 @@ Unless otherwise mentioned, these are techniques that work with existing models
|
||||
9. [Textual Inversion](#textual-inversion)
|
||||
10. [ControlNet](#controlnet)
|
||||
11. [Prompt Weighting](#prompt-weighting)
|
||||
12. [Custom Diffusion](#custom-diffusion)
|
||||
13. [Model Editing](#model-editing)
|
||||
14. [DiffEdit](#diffedit)
|
||||
|
||||
For convenience, we provide a table to denote which methods are inference-only and which require fine-tuning/training.
|
||||
|
||||
| **Method** | **Inference only** | **Requires training /<br> fine-tuning** | **Comments** |
|
||||
|:---:|:---:|:---:|:---:|
|
||||
| [Instruct Pix2Pix](#instruct-pix2pix) | ✅ | ❌ | Can additionally be<br>fine-tuned for better <br>performance on specific <br>edit instructions. |
|
||||
| [Pix2Pix Zero](#pix2pixzero) | ✅ | ❌ | |
|
||||
| [Attend and Excite](#attend-and-excite) | ✅ | ❌ | |
|
||||
| [Semantic Guidance](#semantic-guidance) | ✅ | ❌ | |
|
||||
| [Self-attention Guidance](#self-attention-guidance) | ✅ | ❌ | |
|
||||
| [Depth2Image](#depth2image) | ✅ | ❌ | |
|
||||
| [MultiDiffusion Panorama](#multidiffusion-panorama) | ✅ | ❌ | |
|
||||
| [DreamBooth](#dreambooth) | ❌ | ✅ | |
|
||||
| [Textual Inversion](#textual-inversion) | ❌ | ✅ | |
|
||||
| [ControlNet](#controlnet) | ✅ | ❌ | A ControlNet can be <br>trained/fine-tuned on<br>a custom conditioning. |
|
||||
| [Prompt Weighting](#prompt-weighting) | ✅ | ❌ | |
|
||||
| [Custom Diffusion](#custom-diffusion) | ❌ | ✅ | |
|
||||
| [Model Editing](#model-editing) | ✅ | ❌ | |
|
||||
| [DiffEdit](#diffedit) | ✅ | ❌ | |
|
||||
|
||||
## Instruct Pix2Pix
|
||||
|
||||
@@ -137,13 +159,13 @@ See [here](../api/pipelines/stable_diffusion/panorama) for more information on h
|
||||
|
||||
In addition to pre-trained models, Diffusers has training scripts for fine-tuning models on user-provided data.
|
||||
|
||||
### DreamBooth
|
||||
## DreamBooth
|
||||
|
||||
[DreamBooth](../training/dreambooth) fine-tunes a model to teach it about a new subject. I.e. a few pictures of a person can be used to generate images of that person in different styles.
|
||||
|
||||
See [here](../training/dreambooth) for more information on how to use it.
|
||||
|
||||
### Textual Inversion
|
||||
## Textual Inversion
|
||||
|
||||
[Textual Inversion](../training/text_inversion) fine-tunes a model to teach it about a new concept. I.e. a few pictures of a style of artwork can be used to generate images in that style.
|
||||
|
||||
@@ -165,3 +187,32 @@ Prompt weighting is a simple technique that puts more attention weight on certai
|
||||
input.
|
||||
|
||||
For a more in-detail explanation and examples, see [here](../using-diffusers/weighted_prompts).
|
||||
|
||||
## Custom Diffusion
|
||||
|
||||
[Custom Diffusion](../training/custom_diffusion) only fine-tunes the cross-attention maps of a pre-trained
|
||||
text-to-image diffusion model. It also allows for additionally performing textual inversion. It supports
|
||||
multi-concept training by design. Like DreamBooth and Textual Inversion, Custom Diffusion is also used to
|
||||
teach a pre-trained text-to-image diffusion model about new concepts to generate outputs involving the
|
||||
concept(s) of interest.
|
||||
|
||||
For more details, check out our [official doc](../training/custom_diffusion).
|
||||
|
||||
## Model Editing
|
||||
|
||||
[Paper](https://arxiv.org/abs/2303.08084)
|
||||
|
||||
The [text-to-image model editing pipeline](../api/pipelines/stable_diffusion/model_editing) helps you mitigate some of the incorrect implicit assumptions a pre-trained text-to-image
|
||||
diffusion model might make about the subjects present in the input prompt. For example, if you prompt Stable Diffusion to generate images for "A pack of roses", the roses in the generated images
|
||||
are more likely to be red. This pipeline helps you change that assumption.
|
||||
|
||||
To know more details, check out the [official doc](../api/pipelines/stable_diffusion/model_editing).
|
||||
|
||||
## DiffEdit
|
||||
|
||||
[Paper](https://arxiv.org/abs/2210.11427)
|
||||
|
||||
[DiffEdit](../api/pipelines/stable_diffusion/diffedit) allows for semantic editing of input images along with
|
||||
input prompts while preserving the original input images as much as possible.
|
||||
|
||||
To know more details, check out the [official doc](../api/pipelines/stable_diffusion/model_editing).
|
||||
@@ -52,7 +52,7 @@ Now you can create a prompt to replace the mask with something else:
|
||||
|
||||
```python
|
||||
prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
|
||||
image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
|
||||
image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
|
||||
```
|
||||
|
||||
`image` | `mask_image` | `prompt` | output |
|
||||
|
||||
@@ -1,179 +0,0 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Using KerasCV Stable Diffusion Checkpoints in Diffusers
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
This is an experimental feature.
|
||||
|
||||
</Tip>
|
||||
|
||||
[KerasCV](https://github.com/keras-team/keras-cv/) provides APIs for implementing various computer vision workflows. It
|
||||
also provides the Stable Diffusion [v1 and v2](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/stable_diffusion)
|
||||
models. Many practitioners find it easy to fine-tune the Stable Diffusion models shipped by KerasCV. However, as of this writing, KerasCV offers limited support to experiment with Stable Diffusion models for inference and deployment. On the other hand,
|
||||
Diffusers provides tooling dedicated to this purpose (and more), such as different [noise schedulers](https://huggingface.co/docs/diffusers/using-diffusers/schedulers), [flash attention](https://huggingface.co/docs/diffusers/optimization/xformers), and [other
|
||||
optimization techniques](https://huggingface.co/docs/diffusers/optimization/fp16).
|
||||
|
||||
How about fine-tuning Stable Diffusion models in KerasCV and exporting them such that they become compatible with Diffusers to combine the
|
||||
best of both worlds? We have created a [tool](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers) that
|
||||
lets you do just that! It takes KerasCV Stable Diffusion checkpoints and exports them to Diffusers-compatible checkpoints.
|
||||
More specifically, it first converts the checkpoints to PyTorch and then wraps them into a
|
||||
[`StableDiffusionPipeline`](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview) which is ready
|
||||
for inference. Finally, it pushes the converted checkpoints to a repository on the Hugging Face Hub.
|
||||
|
||||
We welcome you to try out the tool [here](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers)
|
||||
and share feedback via [discussions](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers/discussions/new).
|
||||
|
||||
## Getting Started
|
||||
|
||||
First, you need to obtain the fine-tuned KerasCV Stable Diffusion checkpoints. We provide an
|
||||
overview of the different ways Stable Diffusion models can be fine-tuned [using `diffusers`](https://huggingface.co/docs/diffusers/training/overview). For the Keras implementation of some of these methods, you can check out these resources:
|
||||
|
||||
* [Teach StableDiffusion new concepts via Textual Inversion](https://keras.io/examples/generative/fine_tune_via_textual_inversion/)
|
||||
* [Fine-tuning Stable Diffusion](https://keras.io/examples/generative/finetune_stable_diffusion/)
|
||||
* [DreamBooth](https://keras.io/examples/generative/dreambooth/)
|
||||
* [Prompt-to-Prompt editing](https://github.com/miguelCalado/prompt-to-prompt-tensorflow)
|
||||
|
||||
Stable Diffusion is comprised of the following models:
|
||||
|
||||
* Text encoder
|
||||
* UNet
|
||||
* VAE
|
||||
|
||||
Depending on the fine-tuning task, we may fine-tune one or more of these components (the VAE is almost always left untouched). Here are some common combinations:
|
||||
|
||||
* DreamBooth: UNet and text encoder
|
||||
* Classical text to image fine-tuning: UNet
|
||||
* Textual Inversion: Just the newly initialized embeddings in the text encoder
|
||||
|
||||
### Performing the Conversion
|
||||
|
||||
Let's use [this checkpoint](https://huggingface.co/sayakpaul/textual-inversion-kerasio/resolve/main/textual_inversion_kerasio.h5) which was generated
|
||||
by conducting Textual Inversion with the following "placeholder token": `<my-funny-cat-token>`.
|
||||
|
||||
On the tool, we supply the following things:
|
||||
|
||||
* Path(s) to download the fine-tuned checkpoint(s) (KerasCV)
|
||||
* An HF token
|
||||
* Placeholder token (only applicable for Textual Inversion)
|
||||
|
||||
<div align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/space_snap.png"/>
|
||||
</div>
|
||||
|
||||
As soon as you hit "Submit", the conversion process will begin. Once it's complete, you should see the following:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/model_push_success.png"/>
|
||||
</div>
|
||||
|
||||
If you click the [link](https://huggingface.co/sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline/tree/main), you
|
||||
should see something like so:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/model_repo_contents.png"/>
|
||||
</div>
|
||||
|
||||
If you head over to the [model card of the repository](https://huggingface.co/sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline), the
|
||||
following should appear:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/model_card.png"/>
|
||||
</div>
|
||||
|
||||
<Tip>
|
||||
|
||||
Note that we're not specifying the UNet weights here since the UNet is not fine-tuned during Textual Inversion.
|
||||
|
||||
</Tip>
|
||||
|
||||
And that's it! You now have your fine-tuned KerasCV Stable Diffusion model in Diffusers 🧨.
|
||||
|
||||
## Using the Converted Model in Diffusers
|
||||
|
||||
Just beside the model card of the [repository](https://huggingface.co/sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline),
|
||||
you'd notice an inference widget to try out the model directly from the UI 🤗
|
||||
|
||||
<div align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inference_widget_output.png"/>
|
||||
</div>
|
||||
|
||||
On the top right hand side, we provide a "Use in Diffusers" button. If you click the button, you should see the following code-snippet:
|
||||
|
||||
```py
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline")
|
||||
```
|
||||
|
||||
The model is in standard `diffusers` format. Let's perform inference!
|
||||
|
||||
```py
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline")
|
||||
pipeline.to("cuda")
|
||||
|
||||
placeholder_token = "<my-funny-cat-token>"
|
||||
prompt = f"two {placeholder_token} getting married, photorealistic, high quality"
|
||||
image = pipeline(prompt, num_inference_steps=50).images[0]
|
||||
```
|
||||
|
||||
And we get:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusers_output_one.png"/>
|
||||
</div>
|
||||
|
||||
_**Note that if you specified a `placeholder_token` while performing the conversion, the tool will log it accordingly. Refer
|
||||
to the model card of [this repository](https://huggingface.co/sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline)
|
||||
as an example.**_
|
||||
|
||||
We welcome you to use the tool for various Stable Diffusion fine-tuning scenarios and let us know your feedback! Here are some examples
|
||||
of Diffusers checkpoints that were obtained using the tool:
|
||||
|
||||
* [sayakpaul/text-unet-dogs-kerascv_sd_diffusers_pipeline](https://huggingface.co/sayakpaul/text-unet-dogs-kerascv_sd_diffusers_pipeline) (DreamBooth with both the text encoder and UNet fine-tuned)
|
||||
* [sayakpaul/unet-dogs-kerascv_sd_diffusers_pipeline](https://huggingface.co/sayakpaul/unet-dogs-kerascv_sd_diffusers_pipeline) (DreamBooth with only the UNet fine-tuned)
|
||||
|
||||
## Incorporating Diffusers Goodies 🎁
|
||||
|
||||
Diffusers provides various options that one can leverage to experiment with different inference setups. One particularly
|
||||
useful option is the use of a different noise scheduler during inference other than what was used during fine-tuning.
|
||||
Let's try out the [`DPMSolverMultistepScheduler`](https://huggingface.co/docs/diffusers/main/en/api/schedulers/multistep_dpm_solver)
|
||||
which is different from the one ([`DDPMScheduler`](https://huggingface.co/docs/diffusers/main/en/api/schedulers/ddpm)) used during
|
||||
fine-tuning.
|
||||
|
||||
You can read more details about this process in [this section](https://huggingface.co/docs/diffusers/using-diffusers/schedulers).
|
||||
|
||||
```py
|
||||
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline")
|
||||
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
|
||||
pipeline.to("cuda")
|
||||
|
||||
placeholder_token = "<my-funny-cat-token>"
|
||||
prompt = f"two {placeholder_token} getting married, photorealistic, high quality"
|
||||
image = pipeline(prompt, num_inference_steps=50).images[0]
|
||||
```
|
||||
|
||||
<div align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusers_output_two.png"/>
|
||||
</div>
|
||||
|
||||
One can also continue fine-tuning from these Diffusers checkpoints by leveraging some relevant tools from Diffusers. Refer [here](https://huggingface.co/docs/diffusers/training/overview) for
|
||||
more details. For inference-specific optimizations, refer [here](https://huggingface.co/docs/diffusers/main/en/optimization/fp16).
|
||||
|
||||
## Known Limitations
|
||||
|
||||
* Only Stable Diffusion v1 checkpoints are supported for conversion in this tool.
|
||||
191
docs/source/en/using-diffusers/other-formats.mdx
Normal file
191
docs/source/en/using-diffusers/other-formats.mdx
Normal file
@@ -0,0 +1,191 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Load different Stable Diffusion formats
|
||||
|
||||
Stable Diffusion models are available in different formats depending on the framework they're trained and saved with, and where you download them from. Converting these formats for use in 🤗 Diffusers allows you to use all the features supported by the library, such as [using different schedulers](schedulers) for inference, [building your custom pipeline](write_own_pipeline), and a variety of techniques and methods for [optimizing inference speed](./optimization/opt_overview).
|
||||
|
||||
<Tip>
|
||||
|
||||
We highly recommend using the `.safetensors` format because it is more secure than traditional pickled files which are vulnerable and can be exploited to execute any code on your machine (learn more in the [Load safetensors](using_safetensors) guide).
|
||||
|
||||
</Tip>
|
||||
|
||||
This guide will show you how to convert other Stable Diffusion formats to be compatible with 🤗 Diffusers.
|
||||
|
||||
## PyTorch .ckpt
|
||||
|
||||
The checkpoint - or `.ckpt` - format is commonly used to store and save models. The `.ckpt` file contains the entire model and is typically several GBs in size. While you can load and use a `.ckpt` file directly with the [`~StableDiffusionPipeline.from_ckpt`] method, it is generally better to convert the `.ckpt` file to 🤗 Diffusers so both formats are available.
|
||||
|
||||
There are two options for converting a `.ckpt` file; use a Space to convert the checkpoint or convert the `.ckpt` file with a script.
|
||||
|
||||
### Convert with a Space
|
||||
|
||||
The easiest and most convenient way to convert a `.ckpt` file is to use the [SD to Diffusers](https://huggingface.co/spaces/diffusers/sd-to-diffusers) Space. You can follow the instructions on the Space to convert the `.ckpt` file.
|
||||
|
||||
This approach works well for basic models, but it may struggle with more customized models. You'll know the Space failed if it returns an empty pull request or error. In this case, you can try converting the `.ckpt` file with a script.
|
||||
|
||||
### Convert with a script
|
||||
|
||||
🤗 Diffusers provides a [conversion script](https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py) for converting `.ckpt` files. This approach is more reliable than the Space above.
|
||||
|
||||
Before you start, make sure you have a local clone of 🤗 Diffusers to run the script and log in to your Hugging Face account so you can open pull requests and push your converted model to the Hub.
|
||||
|
||||
```bash
|
||||
huggingface-cli login
|
||||
```
|
||||
|
||||
To use the script:
|
||||
|
||||
1. Git clone the repository containing the `.ckpt` file you want to convert. For this example, let's convert this [TemporalNet](https://huggingface.co/CiaraRowles/TemporalNet) `.ckpt` file:
|
||||
|
||||
```bash
|
||||
git lfs install
|
||||
git clone https://huggingface.co/CiaraRowles/TemporalNet
|
||||
```
|
||||
|
||||
2. Open a pull request on the repository where you're converting the checkpoint from:
|
||||
|
||||
```bash
|
||||
cd TemporalNet && git fetch origin refs/pr/13:pr/13
|
||||
git checkout pr/13
|
||||
```
|
||||
|
||||
3. There are several input arguments to configure in the conversion script, but the most important ones are:
|
||||
|
||||
- `checkpoint_path`: the path to the `.ckpt` file to convert.
|
||||
- `original_config_file`: a YAML file defining the configuration of the original architecture. If you can't find this file, try searching for the YAML file in the GitHub repository where you found the `.ckpt` file.
|
||||
- `dump_path`: the path to the converted model.
|
||||
|
||||
For example, you can take the `cldm_v15.yaml` file from the [ControlNet](https://github.com/lllyasviel/ControlNet/tree/main/models) repository because the TemporalNet model is a Stable Diffusion v1.5 and ControlNet model.
|
||||
|
||||
4. Now you can run the script to convert the `.ckpt` file:
|
||||
|
||||
```bash
|
||||
python ../diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path temporalnetv3.ckpt --original_config_file cldm_v15.yaml --dump_path ./ --controlnet
|
||||
```
|
||||
|
||||
5. Once the conversion is done, upload your converted model and test out the resulting [pull request](https://huggingface.co/CiaraRowles/TemporalNet/discussions/13)!
|
||||
|
||||
```bash
|
||||
git push origin pr/13:refs/pr/13
|
||||
```
|
||||
|
||||
## Keras .pb or .h5
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
🧪 This is an experimental feature. Only Stable Diffusion v1 checkpoints are supported by the Convert KerasCV Space at the moment.
|
||||
|
||||
</Tip>
|
||||
|
||||
[KerasCV](https://keras.io/keras_cv/) supports training for [Stable Diffusion](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/stable_diffusion) v1 and v2. However, it offers limited support for experimenting with Stable Diffusion models for inference and deployment whereas 🤗 Diffusers has a more complete set of features for this purpose, such as different [noise schedulers](https://huggingface.co/docs/diffusers/using-diffusers/schedulers), [flash attention](https://huggingface.co/docs/diffusers/optimization/xformers), and [other
|
||||
optimization techniques](https://huggingface.co/docs/diffusers/optimization/fp16).
|
||||
|
||||
The [Convert KerasCV](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers) Space converts `.pb` or `.h5` files to PyTorch, and then wraps them in a [`StableDiffusionPipeline`] so it is ready for inference. The converted checkpoint is stored in a repository on the Hugging Face Hub.
|
||||
|
||||
For this example, let's convert the [`sayakpaul/textual-inversion-kerasio`](https://huggingface.co/sayakpaul/textual-inversion-kerasio/tree/main) checkpoint which was trained with Textual Inversion. It uses the special token `<my-funny-cat>` to personalize images with cats.
|
||||
|
||||
The Convert KerasCV Space allows you to input the following:
|
||||
|
||||
* Your Hugging Face token.
|
||||
* Paths to download the UNet and text encoder weights from. Depending on how the model was trained, you don't necessarily need to provide the paths to both the UNet and text encoder. For example, Textual Inversion only requires the embeddings from the text encoder and a text-to-image model only requires the UNet weights.
|
||||
* Placeholder token is only applicable for textual inversion models.
|
||||
* The `output_repo_prefix` is the name of the repository where the converted model is stored.
|
||||
|
||||
Click the **Submit** button to automatically convert the KerasCV checkpoint! Once the checkpoint is successfully converted, you'll see a link to the new repository containing the converted checkpoint. Follow the link to the new repository, and you'll see the Convert KerasCV Space generated a model card with an inference widget to try out the converted model.
|
||||
|
||||
If you prefer to run inference with code, click on the **Use in Diffusers** button in the upper right corner of the model card to copy and paste the code snippet:
|
||||
|
||||
```py
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline")
|
||||
```
|
||||
|
||||
Then you can generate an image like:
|
||||
|
||||
```py
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline")
|
||||
pipeline.to("cuda")
|
||||
|
||||
placeholder_token = "<my-funny-cat-token>"
|
||||
prompt = f"two {placeholder_token} getting married, photorealistic, high quality"
|
||||
image = pipeline(prompt, num_inference_steps=50).images[0]
|
||||
```
|
||||
|
||||
## A1111 LoRA files
|
||||
|
||||
[Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) (A1111) is a popular web UI for Stable Diffusion that supports model sharing platforms like [Civitai](https://civitai.com/). Models trained with the Low-Rank Adaptation (LoRA) technique are especially popular because they're fast to train and have a much smaller file size than a fully finetuned model. 🤗 Diffusers supports loading A1111 LoRA checkpoints with [`~loaders.LoraLoaderMixin.load_lora_weights`]:
|
||||
|
||||
```py
|
||||
from diffusers import DiffusionPipeline, UniPCMultistepScheduler
|
||||
import torch
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained(
|
||||
"andite/anything-v4.0", torch_dtype=torch.float16, safety_checker=None
|
||||
).to("cuda")
|
||||
pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
|
||||
```
|
||||
|
||||
Download a LoRA checkpoint from Civitai; this example uses the [Howls Moving Castle,Interior/Scenery LoRA (Ghibli Stlye)](https://civitai.com/models/14605?modelVersionId=19998) checkpoint, but feel free to try out any LoRA checkpoint!
|
||||
|
||||
```bash
|
||||
!wget https://civitai.com/api/download/models/19998 -O howls_moving_castle.safetensors
|
||||
```
|
||||
|
||||
Load the LoRA checkpoint into the pipeline with the [`~loaders.LoraLoaderMixin.load_lora_weights`] method:
|
||||
|
||||
```py
|
||||
pipeline.load_lora_weights(".", weight_name="howls_moving_castle.safetensors")
|
||||
```
|
||||
|
||||
Now you can use the pipeline to generate images:
|
||||
|
||||
```py
|
||||
prompt = "masterpiece, illustration, ultra-detailed, cityscape, san francisco, golden gate bridge, california, bay area, in the snow, beautiful detailed starry sky"
|
||||
negative_prompt = "lowres, cropped, worst quality, low quality, normal quality, artifacts, signature, watermark, username, blurry, more than one bridge, bad architecture"
|
||||
|
||||
images = pipeline(
|
||||
prompt=prompt,
|
||||
negative_prompt=negative_prompt,
|
||||
width=512,
|
||||
height=512,
|
||||
num_inference_steps=25,
|
||||
num_images_per_prompt=4,
|
||||
generator=torch.manual_seed(0),
|
||||
).images
|
||||
```
|
||||
|
||||
Finally, create a helper function to display the images:
|
||||
|
||||
```py
|
||||
from PIL import Image
|
||||
|
||||
|
||||
def image_grid(imgs, rows=2, cols=2):
|
||||
w, h = imgs[0].size
|
||||
grid = Image.new("RGB", size=(cols * w, rows * h))
|
||||
|
||||
for i, img in enumerate(imgs):
|
||||
grid.paste(img, box=(i % cols * w, i // cols * h))
|
||||
return grid
|
||||
|
||||
|
||||
image_grid(images)
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/a1111-lora-sf.png"/>
|
||||
</div>
|
||||
@@ -111,7 +111,7 @@ print(np.abs(image).sum())
|
||||
|
||||
The result is not the same even though you're using an identical seed because the GPU uses a different random number generator than the CPU.
|
||||
|
||||
To circumvent this problem, 🧨 Diffusers has a [`randn_tensor`](#diffusers.utils.randn_tensor) function for creating random noise on the CPU, and then moving the tensor to a GPU if necessary. The `randn_tensor` function is used everywhere inside the pipeline, allowing the user to **always** pass a CPU `Generator` even if the pipeline is run on a GPU.
|
||||
To circumvent this problem, 🧨 Diffusers has a [`~diffusers.utils.randn_tensor`] function for creating random noise on the CPU, and then moving the tensor to a GPU if necessary. The `randn_tensor` function is used everywhere inside the pipeline, allowing the user to **always** pass a CPU `Generator` even if the pipeline is run on a GPU.
|
||||
|
||||
You'll see the results are much closer now!
|
||||
|
||||
@@ -147,9 +147,6 @@ susceptible to precision error propagation. Don't expect similar results across
|
||||
different GPU hardware or PyTorch versions. In this case, you'll need to run
|
||||
exactly the same hardware and PyTorch version for full reproducibility.
|
||||
|
||||
### randn_tensor
|
||||
[[autodoc]] diffusers.utils.randn_tensor
|
||||
|
||||
## Deterministic algorithms
|
||||
|
||||
You can also configure PyTorch to use deterministic algorithms to create a reproducible pipeline. However, you should be aware that deterministic algorithms may be slower than nondeterministic ones and you may observe a decrease in performance. But if reproducibility is important to you, then this is the way to go!
|
||||
|
||||
@@ -28,18 +28,15 @@ The following paragraphs show how to do so with the 🧨 Diffusers library.
|
||||
|
||||
## Load pipeline
|
||||
|
||||
Let's start by loading the stable diffusion pipeline.
|
||||
Remember that you have to be a registered user on the 🤗 Hugging Face Hub, and have "click-accepted" the [license](https://huggingface.co/runwayml/stable-diffusion-v1-5) in order to use stable diffusion.
|
||||
Let's start by loading the [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) model in the [`DiffusionPipeline`]:
|
||||
|
||||
```python
|
||||
from huggingface_hub import login
|
||||
from diffusers import DiffusionPipeline
|
||||
import torch
|
||||
|
||||
# first we need to login with our access token
|
||||
login()
|
||||
|
||||
# Now we can download the pipeline
|
||||
pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
||||
```
|
||||
|
||||
|
||||
@@ -0,0 +1,80 @@
|
||||
# Textual inversion
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
The [`StableDiffusionPipeline`] supports textual inversion, a technique that enables a model like Stable Diffusion to learn a new concept from just a few sample images. This gives you more control over the generated images and allows you to tailor the model towards specific concepts. You can get started quickly with a collection of community created concepts in the [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer).
|
||||
|
||||
This guide will show you how to run inference with textual inversion using a pre-learned concept from the Stable Diffusion Conceptualizer. If you're interested in teaching a model new concepts with textual inversion, take a look at the [Textual Inversion](./training/text_inversion) training guide.
|
||||
|
||||
Login to your Hugging Face account:
|
||||
|
||||
```py
|
||||
from huggingface_hub import notebook_login
|
||||
|
||||
notebook_login()
|
||||
```
|
||||
|
||||
Import the necessary libraries, and create a helper function to visualize the generated images:
|
||||
|
||||
```py
|
||||
import os
|
||||
import torch
|
||||
|
||||
import PIL
|
||||
from PIL import Image
|
||||
|
||||
from diffusers import StableDiffusionPipeline
|
||||
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
||||
|
||||
|
||||
def image_grid(imgs, rows, cols):
|
||||
assert len(imgs) == rows * cols
|
||||
|
||||
w, h = imgs[0].size
|
||||
grid = Image.new("RGB", size=(cols * w, rows * h))
|
||||
grid_w, grid_h = grid.size
|
||||
|
||||
for i, img in enumerate(imgs):
|
||||
grid.paste(img, box=(i % cols * w, i // cols * h))
|
||||
return grid
|
||||
```
|
||||
|
||||
Pick a Stable Diffusion checkpoint and a pre-learned concept from the [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer):
|
||||
|
||||
```py
|
||||
pretrained_model_name_or_path = "runwayml/stable-diffusion-v1-5"
|
||||
repo_id_embeds = "sd-concepts-library/cat-toy"
|
||||
```
|
||||
|
||||
Now you can load a pipeline, and pass the pre-learned concept to it:
|
||||
|
||||
```py
|
||||
pipeline = StableDiffusionPipeline.from_pretrained(pretrained_model_name_or_path, torch_dtype=torch.float16).to("cuda")
|
||||
|
||||
pipeline.load_textual_inversion(repo_id_embeds)
|
||||
```
|
||||
|
||||
Create a prompt with the pre-learned concept by using the special placeholder token `<cat-toy>`, and choose the number of samples and rows of images you'd like to generate:
|
||||
|
||||
```py
|
||||
prompt = "a grafitti in a favela wall with a <cat-toy> on it"
|
||||
|
||||
num_samples = 2
|
||||
num_rows = 2
|
||||
```
|
||||
|
||||
Then run the pipeline (feel free to adjust the parameters like `num_inference_steps` and `guidance_scale` to see how they affect image quality), save the generated images and visualize them with the helper function you created at the beginning:
|
||||
|
||||
```py
|
||||
all_images = []
|
||||
for _ in range(num_rows):
|
||||
images = pipe(prompt, num_images_per_prompt=num_samples, num_inference_steps=50, guidance_scale=7.5).images
|
||||
all_images.extend(images)
|
||||
|
||||
grid = image_grid(all_images, num_samples, num_rows)
|
||||
grid
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/textual_inversion_inference.png">
|
||||
</div>
|
||||
@@ -1,87 +1,67 @@
|
||||
# What is safetensors ?
|
||||
# Load safetensors
|
||||
|
||||
[safetensors](https://github.com/huggingface/safetensors) is a different format
|
||||
from the classic `.bin` which uses Pytorch which uses pickle. It contains the
|
||||
exact same data, which is just the model weights (or tensors).
|
||||
[safetensors](https://github.com/huggingface/safetensors) is a safe and fast file format for storing and loading tensors. Typically, PyTorch model weights are saved or *pickled* into a `.bin` file with Python's [`pickle`](https://docs.python.org/3/library/pickle.html) utility. However, `pickle` is not secure and pickled files may contain malicious code that can be executed. safetensors is a secure alternative to `pickle`, making it ideal for sharing model weights.
|
||||
|
||||
Pickle is notoriously unsafe which allow any malicious file to execute arbitrary code.
|
||||
The hub itself tries to prevent issues from it, but it's not a silver bullet.
|
||||
This guide will show you how you load `.safetensor` files, and how to convert Stable Diffusion model weights stored in other formats to `.safetensor`. Before you start, make sure you have safetensors installed:
|
||||
|
||||
`safetensors` first and foremost goal is to make loading machine learning models *safe*
|
||||
in the sense that no takeover of your computer can be done.
|
||||
|
||||
Hence the name.
|
||||
|
||||
# Why use safetensors ?
|
||||
|
||||
**Safety** can be one reason, if you're attempting to use a not well known model and
|
||||
you're not sure about the source of the file.
|
||||
|
||||
And a secondary reason, is **the speed of loading**. Safetensors can load models much faster
|
||||
than regular pickle files. If you spend a lot of times switching models, this can be
|
||||
a huge timesave.
|
||||
|
||||
Numbers taken AMD EPYC 7742 64-Core Processor
|
||||
```
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1")
|
||||
|
||||
# Loaded in safetensors 0:00:02.033658
|
||||
# Loaded in Pytorch 0:00:02.663379
|
||||
```bash
|
||||
!pip install safetensors
|
||||
```
|
||||
|
||||
This is for the entire loading time, the actual weights loading time to load 500MB:
|
||||
If you look at the [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main) repository, you'll see weights inside the `text_encoder`, `unet` and `vae` subfolders are stored in the `.safetensors` format. By default, 🤗 Diffusers automatically loads these `.safetensors` files from their subfolders if they're available in the model repository.
|
||||
|
||||
```
|
||||
Safetensors: 3.4873ms
|
||||
PyTorch: 172.7537ms
|
||||
```
|
||||
For more explicit control, you can optionally set `use_safetensors=True` (if `safetensors` is not installed, you'll get an error message asking you to install it):
|
||||
|
||||
Performance in general is a tricky business, and there are a few things to understand:
|
||||
|
||||
- If you're using the model for the first time from the hub, you will have to download the weights.
|
||||
That's extremely likely to be much slower than any loading method, therefore you will not see any difference
|
||||
- If you're loading the model for the first time (let's say after a reboot) then your machine will have to
|
||||
actually read the disk. It's likely to be as slow in both cases. Again the speed difference may not be as visible (this depends on hardware and the actual model).
|
||||
- The best performance benefit is when the model was already loaded previously on your computer and you're switching from one model to another. Your OS, is trying really hard not to read from disk, since this is slow, so it will keep the files around in RAM, making it loading again much faster. Since safetensors is doing zero-copy of the tensors, reloading will be faster than pytorch since it has at least once extra copy to do.
|
||||
|
||||
# How to use safetensors ?
|
||||
|
||||
If you have `safetensors` installed, and all the weights are available in `safetensors` format, \
|
||||
then by default it will use that instead of the pytorch weights.
|
||||
|
||||
If you are really paranoid about this, the ultimate weapon would be disabling `torch.load`:
|
||||
```python
|
||||
import torch
|
||||
|
||||
|
||||
def _raise():
|
||||
raise RuntimeError("I don't want to use pickle")
|
||||
|
||||
|
||||
torch.load = lambda *args, **kwargs: _raise()
|
||||
```
|
||||
|
||||
# I want to use model X but it doesn't have safetensors weights.
|
||||
|
||||
Just go to this [space](https://huggingface.co/spaces/diffusers/convert).
|
||||
This will create a new PR with the weights, let's say `refs/pr/22`.
|
||||
|
||||
This space will download the pickled version, convert it, and upload it on the hub as a PR.
|
||||
If anything bad is contained in the file, it's Huggingface hub that will get issues, not your own computer.
|
||||
And we're equipped with dealing with it.
|
||||
|
||||
Then in order to use the model, even before the branch gets accepted by the original author you can do:
|
||||
|
||||
```python
|
||||
```py
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", revision="refs/pr/22")
|
||||
pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", use_safetensors=True)
|
||||
```
|
||||
|
||||
or you can test it directly online with this [space](https://huggingface.co/spaces/diffusers/check_pr).
|
||||
However, model weights are not necessarily stored in separate subfolders like in the example above. Sometimes, all the weights are stored in a single `.safetensors` file. In this case, if the weights are Stable Diffusion weights, you can load the file directly with the [`~diffusers.loaders.FromCkptMixin.from_ckpt`] method:
|
||||
|
||||
And that's it !
|
||||
```py
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
Anything unclear, concerns, or found a bugs ? [Open an issue](https://github.com/huggingface/diffusers/issues/new/choose)
|
||||
pipeline = StableDiffusionPipeline.from_ckpt(
|
||||
"https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors"
|
||||
)
|
||||
```
|
||||
|
||||
## Convert to safetensors
|
||||
|
||||
Not all weights on the Hub are available in the `.safetensors` format, and you may encounter weights stored as `.bin`. In this case, use the [Convert Space](https://huggingface.co/spaces/diffusers/convert) to convert the weights to `.safetensors`. The Convert Space downloads the pickled weights, converts them, and opens a Pull Request to upload the newly converted `.safetensors` file on the Hub. This way, if there is any malicious code contained in the pickled files, they're uploaded to the Hub - which has a [security scanner](https://huggingface.co/docs/hub/security-pickle#hubs-security-scanner) to detect unsafe files and suspicious pickle imports - instead of your computer.
|
||||
|
||||
You can use the model with the new `.safetensors` weights by specifying the reference to the Pull Request in the `revision` parameter (you can also test it in this [Check PR](https://huggingface.co/spaces/diffusers/check_pr) Space on the Hub), for example `refs/pr/22`:
|
||||
|
||||
```py
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", revision="refs/pr/22")
|
||||
```
|
||||
|
||||
## Why use safetensors?
|
||||
|
||||
There are several reasons for using safetensors:
|
||||
|
||||
- Safety is the number one reason for using safetensors. As open-source and model distribution grows, it is important to be able to trust the model weights you downloaded don't contain any malicious code. The current size of the header in safetensors prevents parsing extremely large JSON files.
|
||||
- Loading speed between switching models is another reason to use safetensors, which performs zero-copy of the tensors. It is especially fast compared to `pickle` if you're loading the weights to CPU (the default case), and just as fast if not faster when directly loading the weights to GPU. You'll only notice the performance difference if the model is already loaded, and not if you're downloading the weights or loading the model for the first time.
|
||||
|
||||
The time it takes to load the entire pipeline:
|
||||
|
||||
```py
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipeline = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1")
|
||||
"Loaded in safetensors 0:00:02.033658"
|
||||
"Loaded in PyTorch 0:00:02.663379"
|
||||
```
|
||||
|
||||
But the actual time it takes to load 500MB of the model weights is only:
|
||||
|
||||
```bash
|
||||
safetensors: 3.4873ms
|
||||
PyTorch: 172.7537ms
|
||||
```
|
||||
|
||||
- Lazy loading is also supported in safetensors, which is useful in distributed settings to only load some of the tensors. This format allowed the [BLOOM](https://huggingface.co/bigscience/bloom) model to be loaded in 45 seconds on 8 GPUs instead of 10 minutes with regular PyTorch weights.
|
||||
|
||||
@@ -94,5 +94,15 @@ a try!
|
||||
If your favorite pipeline does not have a `prompt_embeds` input, please make sure to open an issue, the
|
||||
diffusers team tries to be as responsive as possible.
|
||||
|
||||
Compel 1.1.6 adds a utility class to simplify using textual inversions. Instantiate a `DiffusersTextualInversionManager` and pass it to Compel init:
|
||||
|
||||
```
|
||||
textual_inversion_manager = DiffusersTextualInversionManager(pipe)
|
||||
compel = Compel(
|
||||
tokenizer=pipe.tokenizer,
|
||||
text_encoder=pipe.text_encoder,
|
||||
textual_inversion_manager=textual_inversion_manager)
|
||||
```
|
||||
|
||||
Also, please check out the documentation of the [compel](https://github.com/damian0815/compel) library for
|
||||
more information.
|
||||
|
||||
@@ -36,7 +36,7 @@ A pipeline is a quick and easy way to run a model for inference, requiring no mo
|
||||
|
||||
That was super easy, but how did the pipeline do that? Let's breakdown the pipeline and take a look at what's happening under the hood.
|
||||
|
||||
In the example above, the pipeline contains a UNet model and a DDPM scheduler. The pipeline denoises an image by taking random noise the size of the desired output and passing it through the model several times. At each timestep, the model predicts the *noise residual* and the scheduler uses it to predict a less noisy image. The pipeline repeats this process until it reaches the end of the specified number of inference steps.
|
||||
In the example above, the pipeline contains a [`UNet2DModel`] model and a [`DDPMScheduler`]. The pipeline denoises an image by taking random noise the size of the desired output and passing it through the model several times. At each timestep, the model predicts the *noise residual* and the scheduler uses it to predict a less noisy image. The pipeline repeats this process until it reaches the end of the specified number of inference steps.
|
||||
|
||||
To recreate the pipeline with the model and scheduler separately, let's write our own denoising process.
|
||||
|
||||
@@ -82,8 +82,8 @@ To recreate the pipeline with the model and scheduler separately, let's write ou
|
||||
>>> for t in scheduler.timesteps:
|
||||
... with torch.no_grad():
|
||||
... noisy_residual = model(input, t).sample
|
||||
>>> previous_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample
|
||||
>>> input = previous_noisy_sample
|
||||
... previous_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample
|
||||
... input = previous_noisy_sample
|
||||
```
|
||||
|
||||
This is the entire denoising process, and you can use this same pattern to write any diffusion system.
|
||||
@@ -96,7 +96,7 @@ To recreate the pipeline with the model and scheduler separately, let's write ou
|
||||
|
||||
>>> image = (input / 2 + 0.5).clamp(0, 1)
|
||||
>>> image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
|
||||
>>> image = Image.fromarray((image * 255)).round().astype("uint8")
|
||||
>>> image = Image.fromarray((image * 255).round().astype("uint8"))
|
||||
>>> image
|
||||
```
|
||||
|
||||
@@ -287,4 +287,4 @@ This is really what 🧨 Diffusers is designed for: to make it intuitive and eas
|
||||
For your next steps, feel free to:
|
||||
|
||||
* Learn how to [build and contribute a pipeline](using-diffusers/#contribute_pipeline) to 🧨 Diffusers. We can't wait and see what you'll come up with!
|
||||
* Explore [existing pipelines](./api/pipelines/overview) in the library, and see if you can deconstruct and build a pipeline from scratch using the models and schedulers separately.
|
||||
* Explore [existing pipelines](./api/pipelines/overview) in the library, and see if you can deconstruct and build a pipeline from scratch using the models and schedulers separately.
|
||||
|
||||
@@ -3,191 +3,46 @@
|
||||
title: "🧨 Diffusers"
|
||||
- local: quicktour
|
||||
title: "훑어보기"
|
||||
- local: in_translation
|
||||
title: Stable Diffusion
|
||||
- local: installation
|
||||
title: "설치"
|
||||
title: "시작하기"
|
||||
|
||||
- sections:
|
||||
- sections:
|
||||
- local: in_translation
|
||||
title: "Loading Pipelines, Models, and Schedulers"
|
||||
title: 개요
|
||||
- local: in_translation
|
||||
title: "Using different Schedulers"
|
||||
title: Unconditional 이미지 생성
|
||||
- local: in_translation
|
||||
title: "Configuring Pipelines, Models, and Schedulers"
|
||||
title: Textual Inversion
|
||||
- local: training/dreambooth
|
||||
title: DreamBooth
|
||||
- local: training/text2image
|
||||
title: Text-to-image
|
||||
- local: training/lora
|
||||
title: Low-Rank Adaptation of Large Language Models (LoRA)
|
||||
- local: in_translation
|
||||
title: "Loading and Adding Custom Pipelines"
|
||||
title: "불러오기 & 허브 (번역 예정)"
|
||||
- sections:
|
||||
title: ControlNet
|
||||
- local: in_translation
|
||||
title: "Unconditional Image Generation"
|
||||
- local: in_translation
|
||||
title: "Text-to-Image Generation"
|
||||
- local: in_translation
|
||||
title: "Text-Guided Image-to-Image"
|
||||
- local: in_translation
|
||||
title: "Text-Guided Image-Inpainting"
|
||||
- local: in_translation
|
||||
title: "Text-Guided Depth-to-Image"
|
||||
- local: in_translation
|
||||
title: "Reusing seeds for deterministic generation"
|
||||
- local: in_translation
|
||||
title: "Community Pipelines"
|
||||
- local: in_translation
|
||||
title: "How to contribute a Pipeline"
|
||||
title: "추론을 위한 파이프라인 (번역 예정)"
|
||||
- sections:
|
||||
- local: in_translation
|
||||
title: "Reinforcement Learning"
|
||||
- local: in_translation
|
||||
title: "Audio"
|
||||
- local: in_translation
|
||||
title: "Other Modalities"
|
||||
title: "Taking Diffusers Beyond Images"
|
||||
title: "Diffusers 사용법 (번역 예정)"
|
||||
title: InstructPix2Pix 학습
|
||||
title: 학습
|
||||
- sections:
|
||||
- local: in_translation
|
||||
title: "Memory and Speed"
|
||||
title: 개요
|
||||
- local: optimization/fp16
|
||||
title: 메모리와 속도
|
||||
- local: in_translation
|
||||
title: "xFormers"
|
||||
- local: in_translation
|
||||
title: "ONNX"
|
||||
- local: in_translation
|
||||
title: "OpenVINO"
|
||||
- local: in_translation
|
||||
title: "MPS"
|
||||
- local: in_translation
|
||||
title: "Habana Gaudi"
|
||||
title: "최적화/특수 하드웨어 (번역 예정)"
|
||||
- sections:
|
||||
- local: in_translation
|
||||
title: "Overview"
|
||||
- local: in_translation
|
||||
title: "Unconditional Image Generation"
|
||||
- local: in_translation
|
||||
title: "Textual Inversion"
|
||||
- local: in_translation
|
||||
title: "Dreambooth"
|
||||
- local: in_translation
|
||||
title: "Text-to-image fine-tuning"
|
||||
title: "학습 (번역 예정)"
|
||||
- sections:
|
||||
- local: in_translation
|
||||
title: "Stable Diffusion"
|
||||
- local: in_translation
|
||||
title: "Philosophy"
|
||||
- local: in_translation
|
||||
title: "How to contribute?"
|
||||
title: "개념 설명 (번역 예정)"
|
||||
- sections:
|
||||
- sections:
|
||||
- local: in_translation
|
||||
title: "Models"
|
||||
- local: in_translation
|
||||
title: "Diffusion Pipeline"
|
||||
- local: in_translation
|
||||
title: "Logging"
|
||||
- local: in_translation
|
||||
title: "Configuration"
|
||||
- local: in_translation
|
||||
title: "Outputs"
|
||||
title: "Main Classes"
|
||||
|
||||
- sections:
|
||||
- local: in_translation
|
||||
title: "Overview"
|
||||
- local: in_translation
|
||||
title: "AltDiffusion"
|
||||
- local: in_translation
|
||||
title: "Cycle Diffusion"
|
||||
- local: in_translation
|
||||
title: "DDIM"
|
||||
- local: in_translation
|
||||
title: "DDPM"
|
||||
- local: in_translation
|
||||
title: "Latent Diffusion"
|
||||
- local: in_translation
|
||||
title: "Unconditional Latent Diffusion"
|
||||
- local: in_translation
|
||||
title: "PaintByExample"
|
||||
- local: in_translation
|
||||
title: "PNDM"
|
||||
- local: in_translation
|
||||
title: "Score SDE VE"
|
||||
- sections:
|
||||
- local: in_translation
|
||||
title: "Overview"
|
||||
- local: in_translation
|
||||
title: "Text-to-Image"
|
||||
- local: in_translation
|
||||
title: "Image-to-Image"
|
||||
- local: in_translation
|
||||
title: "Inpaint"
|
||||
- local: in_translation
|
||||
title: "Depth-to-Image"
|
||||
- local: in_translation
|
||||
title: "Image-Variation"
|
||||
- local: in_translation
|
||||
title: "Super-Resolution"
|
||||
title: "Stable Diffusion"
|
||||
- local: in_translation
|
||||
title: "Stable Diffusion 2"
|
||||
- local: in_translation
|
||||
title: "Safe Stable Diffusion"
|
||||
- local: in_translation
|
||||
title: "Stochastic Karras VE"
|
||||
- local: in_translation
|
||||
title: "Dance Diffusion"
|
||||
- local: in_translation
|
||||
title: "UnCLIP"
|
||||
- local: in_translation
|
||||
title: "Versatile Diffusion"
|
||||
- local: in_translation
|
||||
title: "VQ Diffusion"
|
||||
- local: in_translation
|
||||
title: "RePaint"
|
||||
- local: in_translation
|
||||
title: "Audio Diffusion"
|
||||
title: "파이프라인 (번역 예정)"
|
||||
- sections:
|
||||
- local: in_translation
|
||||
title: "Overview"
|
||||
- local: in_translation
|
||||
title: "DDIM"
|
||||
- local: in_translation
|
||||
title: "DDPM"
|
||||
- local: in_translation
|
||||
title: "Singlestep DPM-Solver"
|
||||
- local: in_translation
|
||||
title: "Multistep DPM-Solver"
|
||||
- local: in_translation
|
||||
title: "Heun Scheduler"
|
||||
- local: in_translation
|
||||
title: "DPM Discrete Scheduler"
|
||||
- local: in_translation
|
||||
title: "DPM Discrete Scheduler with ancestral sampling"
|
||||
- local: in_translation
|
||||
title: "Stochastic Kerras VE"
|
||||
- local: in_translation
|
||||
title: "Linear Multistep"
|
||||
- local: in_translation
|
||||
title: "PNDM"
|
||||
- local: in_translation
|
||||
title: "VE-SDE"
|
||||
- local: in_translation
|
||||
title: "IPNDM"
|
||||
- local: in_translation
|
||||
title: "VP-SDE"
|
||||
- local: in_translation
|
||||
title: "Euler scheduler"
|
||||
- local: in_translation
|
||||
title: "Euler Ancestral Scheduler"
|
||||
- local: in_translation
|
||||
title: "VQDiffusionScheduler"
|
||||
- local: in_translation
|
||||
title: "RePaint Scheduler"
|
||||
title: "스케줄러 (번역 예정)"
|
||||
- sections:
|
||||
- local: in_translation
|
||||
title: "RL Planning"
|
||||
title: "Experimental Features"
|
||||
title: "API (번역 예정)"
|
||||
title: Torch2.0 지원
|
||||
- local: optimization/xformers
|
||||
title: xFormers
|
||||
- local: optimization/onnx
|
||||
title: ONNX
|
||||
- local: optimization/open_vino
|
||||
title: OpenVINO
|
||||
- local: optimization/mps
|
||||
title: MPS
|
||||
- local: optimization/habana
|
||||
title: Habana Gaudi
|
||||
title: 최적화/특수 하드웨어
|
||||
410
docs/source/ko/optimization/fp16.mdx
Normal file
410
docs/source/ko/optimization/fp16.mdx
Normal file
@@ -0,0 +1,410 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# 메모리와 속도
|
||||
|
||||
메모리 또는 속도에 대해 🤗 Diffusers *추론*을 최적화하기 위한 몇 가지 기술과 아이디어를 제시합니다.
|
||||
일반적으로, memory-efficient attention을 위해 [xFormers](https://github.com/facebookresearch/xformers) 사용을 추천하기 때문에, 추천하는 [설치 방법](xformers)을 보고 설치해 보세요.
|
||||
|
||||
다음 설정이 성능과 메모리에 미치는 영향에 대해 설명합니다.
|
||||
|
||||
| | 지연시간 | 속도 향상 |
|
||||
| ---------------- | ------- | ------- |
|
||||
| 별도 설정 없음 | 9.50s | x1 |
|
||||
| cuDNN auto-tuner | 9.37s | x1.01 |
|
||||
| fp16 | 3.61s | x2.63 |
|
||||
| Channels Last 메모리 형식 | 3.30s | x2.88 |
|
||||
| traced UNet | 3.21s | x2.96 |
|
||||
| memory-efficient attention | 2.63s | x3.61 |
|
||||
|
||||
<em>
|
||||
NVIDIA TITAN RTX에서 50 DDIM 스텝의 "a photo of an astronaut riding a horse on mars" 프롬프트로 512x512 크기의 단일 이미지를 생성하였습니다.
|
||||
</em>
|
||||
|
||||
## cuDNN auto-tuner 활성화하기
|
||||
|
||||
[NVIDIA cuDNN](https://developer.nvidia.com/cudnn)은 컨볼루션을 계산하는 많은 알고리즘을 지원합니다. Autotuner는 짧은 벤치마크를 실행하고 주어진 입력 크기에 대해 주어진 하드웨어에서 최고의 성능을 가진 커널을 선택합니다.
|
||||
|
||||
**컨볼루션 네트워크**를 활용하고 있기 때문에 (다른 유형들은 현재 지원되지 않음), 다음 설정을 통해 추론 전에 cuDNN autotuner를 활성화할 수 있습니다:
|
||||
|
||||
```python
|
||||
import torch
|
||||
|
||||
torch.backends.cudnn.benchmark = True
|
||||
```
|
||||
|
||||
### fp32 대신 tf32 사용하기 (Ampere 및 이후 CUDA 장치들에서)
|
||||
|
||||
Ampere 및 이후 CUDA 장치에서 행렬곱 및 컨볼루션은 TensorFloat32(TF32) 모드를 사용하여 더 빠르지만 약간 덜 정확할 수 있습니다.
|
||||
기본적으로 PyTorch는 컨볼루션에 대해 TF32 모드를 활성화하지만 행렬 곱셈은 활성화하지 않습니다.
|
||||
네트워크에 완전한 float32 정밀도가 필요한 경우가 아니면 행렬 곱셈에 대해서도 이 설정을 활성화하는 것이 좋습니다.
|
||||
이는 일반적으로 무시할 수 있는 수치의 정확도 손실이 있지만, 계산 속도를 크게 높일 수 있습니다.
|
||||
그것에 대해 [여기](https://huggingface.co/docs/transformers/v4.18.0/en/performance#tf32)서 더 읽을 수 있습니다.
|
||||
추론하기 전에 다음을 추가하기만 하면 됩니다:
|
||||
|
||||
```python
|
||||
import torch
|
||||
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
```
|
||||
|
||||
## 반정밀도 가중치
|
||||
|
||||
더 많은 GPU 메모리를 절약하고 더 빠른 속도를 얻기 위해 모델 가중치를 반정밀도(half precision)로 직접 로드하고 실행할 수 있습니다.
|
||||
여기에는 `fp16`이라는 브랜치에 저장된 float16 버전의 가중치를 불러오고, 그 때 `float16` 유형을 사용하도록 PyTorch에 지시하는 작업이 포함됩니다.
|
||||
|
||||
```Python
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
image = pipe(prompt).images[0]
|
||||
```
|
||||
|
||||
<Tip warning={true}>
|
||||
어떤 파이프라인에서도 [`torch.autocast`](https://pytorch.org/docs/stable/amp.html#torch.autocast) 를 사용하는 것은 검은색 이미지를 생성할 수 있고, 순수한 float16 정밀도를 사용하는 것보다 항상 느리기 때문에 사용하지 않는 것이 좋습니다.
|
||||
</Tip>
|
||||
|
||||
## 추가 메모리 절약을 위한 슬라이스 어텐션
|
||||
|
||||
추가 메모리 절약을 위해, 한 번에 모두 계산하는 대신 단계적으로 계산을 수행하는 슬라이스 버전의 어텐션(attention)을 사용할 수 있습니다.
|
||||
|
||||
<Tip>
|
||||
Attention slicing은 모델이 하나 이상의 어텐션 헤드를 사용하는 한, 배치 크기가 1인 경우에도 유용합니다.
|
||||
하나 이상의 어텐션 헤드가 있는 경우 *QK^T* 어텐션 매트릭스는 상당한 양의 메모리를 절약할 수 있는 각 헤드에 대해 순차적으로 계산될 수 있습니다.
|
||||
</Tip>
|
||||
|
||||
각 헤드에 대해 순차적으로 어텐션 계산을 수행하려면, 다음과 같이 추론 전에 파이프라인에서 [`~StableDiffusionPipeline.enable_attention_slicing`]를 호출하면 됩니다:
|
||||
|
||||
```Python
|
||||
import torch
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
pipe.enable_attention_slicing()
|
||||
image = pipe(prompt).images[0]
|
||||
```
|
||||
|
||||
추론 시간이 약 10% 느려지는 약간의 성능 저하가 있지만 이 방법을 사용하면 3.2GB 정도의 작은 VRAM으로도 Stable Diffusion을 사용할 수 있습니다!
|
||||
|
||||
|
||||
## 더 큰 배치를 위한 sliced VAE 디코드
|
||||
|
||||
제한된 VRAM에서 대규모 이미지 배치를 디코딩하거나 32개 이상의 이미지가 포함된 배치를 활성화하기 위해, 배치의 latent 이미지를 한 번에 하나씩 디코딩하는 슬라이스 VAE 디코드를 사용할 수 있습니다.
|
||||
|
||||
이를 [`~StableDiffusionPipeline.enable_attention_slicing`] 또는 [`~StableDiffusionPipeline.enable_xformers_memory_efficient_attention`]과 결합하여 메모리 사용을 추가로 최소화할 수 있습니다.
|
||||
|
||||
VAE 디코드를 한 번에 하나씩 수행하려면 추론 전에 파이프라인에서 [`~StableDiffusionPipeline.enable_vae_slicing`]을 호출합니다. 예를 들어:
|
||||
|
||||
```Python
|
||||
import torch
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
pipe.enable_vae_slicing()
|
||||
images = pipe([prompt] * 32).images
|
||||
```
|
||||
|
||||
다중 이미지 배치에서 VAE 디코드가 약간의 성능 향상이 이루어집니다. 단일 이미지 배치에서는 성능 영향은 없습니다.
|
||||
|
||||
|
||||
<a name="sequential_offloading"></a>
|
||||
## 메모리 절약을 위해 가속 기능을 사용하여 CPU로 오프로딩
|
||||
|
||||
추가 메모리 절약을 위해 가중치를 CPU로 오프로드하고 순방향 전달을 수행할 때만 GPU로 로드할 수 있습니다.
|
||||
|
||||
CPU 오프로딩을 수행하려면 [`~StableDiffusionPipeline.enable_sequential_cpu_offload`]를 호출하기만 하면 됩니다:
|
||||
|
||||
```Python
|
||||
import torch
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
pipe.enable_sequential_cpu_offload()
|
||||
image = pipe(prompt).images[0]
|
||||
```
|
||||
|
||||
그러면 메모리 소비를 3GB 미만으로 줄일 수 있습니다.
|
||||
|
||||
참고로 이 방법은 전체 모델이 아닌 서브모듈 수준에서 작동합니다. 이는 메모리 소비를 최소화하는 가장 좋은 방법이지만 프로세스의 반복적 특성으로 인해 추론 속도가 훨씬 느립니다. 파이프라인의 UNet 구성 요소는 여러 번 실행됩니다('num_inference_steps' 만큼). 매번 UNet의 서로 다른 서브모듈이 순차적으로 온로드된 다음 필요에 따라 오프로드되므로 메모리 이동 횟수가 많습니다.
|
||||
|
||||
<Tip>
|
||||
또 다른 최적화 방법인 <a href="#model_offloading">모델 오프로딩</a>을 사용하는 것을 고려하십시오. 이는 훨씬 빠르지만 메모리 절약이 크지는 않습니다.
|
||||
</Tip>
|
||||
|
||||
또한 ttention slicing과 연결해서 최소 메모리(< 2GB)로도 동작할 수 있습니다.
|
||||
|
||||
|
||||
```Python
|
||||
import torch
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
pipe.enable_sequential_cpu_offload()
|
||||
pipe.enable_attention_slicing(1)
|
||||
|
||||
image = pipe(prompt).images[0]
|
||||
```
|
||||
|
||||
**참고**: 'enable_sequential_cpu_offload()'를 사용할 때, 미리 파이프라인을 CUDA로 이동하지 **않는** 것이 중요합니다.그렇지 않으면 메모리 소비의 이득이 최소화됩니다. 더 많은 정보를 위해 [이 이슈](https://github.com/huggingface/diffusers/issues/1934)를 보세요.
|
||||
|
||||
<a name="model_offloading"></a>
|
||||
## 빠른 추론과 메모리 메모리 절약을 위한 모델 오프로딩
|
||||
|
||||
[순차적 CPU 오프로딩](#sequential_offloading)은 이전 섹션에서 설명한 것처럼 많은 메모리를 보존하지만 필요에 따라 서브모듈을 GPU로 이동하고 새 모듈이 실행될 때 즉시 CPU로 반환되기 때문에 추론 속도가 느려집니다.
|
||||
|
||||
전체 모델 오프로딩은 각 모델의 구성 요소인 _modules_을 처리하는 대신, 전체 모델을 GPU로 이동하는 대안입니다. 이로 인해 추론 시간에 미치는 영향은 미미하지만(파이프라인을 'cuda'로 이동하는 것과 비교하여) 여전히 약간의 메모리를 절약할 수 있습니다.
|
||||
|
||||
이 시나리오에서는 파이프라인의 주요 구성 요소 중 하나만(일반적으로 텍스트 인코더, unet 및 vae) GPU에 있고, 나머지는 CPU에서 대기할 것입니다.
|
||||
여러 반복을 위해 실행되는 UNet과 같은 구성 요소는 더 이상 필요하지 않을 때까지 GPU에 남아 있습니다.
|
||||
|
||||
이 기능은 아래와 같이 파이프라인에서 `enable_model_cpu_offload()`를 호출하여 활성화할 수 있습니다.
|
||||
|
||||
```Python
|
||||
import torch
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
pipe.enable_model_cpu_offload()
|
||||
image = pipe(prompt).images[0]
|
||||
```
|
||||
|
||||
이는 추가적인 메모리 절약을 위한 attention slicing과도 호환됩니다.
|
||||
|
||||
```Python
|
||||
import torch
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
pipe.enable_model_cpu_offload()
|
||||
pipe.enable_attention_slicing(1)
|
||||
|
||||
image = pipe(prompt).images[0]
|
||||
```
|
||||
|
||||
<Tip>
|
||||
이 기능을 사용하려면 'accelerate' 버전 0.17.0 이상이 필요합니다.
|
||||
</Tip>
|
||||
|
||||
## Channels Last 메모리 형식 사용하기
|
||||
|
||||
Channels Last 메모리 형식은 차원 순서를 보존하는 메모리에서 NCHW 텐서 배열을 대체하는 방법입니다.
|
||||
Channels Last 텐서는 채널이 가장 조밀한 차원이 되는 방식으로 정렬됩니다(일명 픽셀당 이미지를 저장).
|
||||
현재 모든 연산자 Channels Last 형식을 지원하는 것은 아니라 성능이 저하될 수 있으므로, 사용해보고 모델에 잘 작동하는지 확인하는 것이 좋습니다.
|
||||
|
||||
|
||||
예를 들어 파이프라인의 UNet 모델이 channels Last 형식을 사용하도록 설정하려면 다음을 사용할 수 있습니다:
|
||||
|
||||
```python
|
||||
print(pipe.unet.conv_out.state_dict()["weight"].stride()) # (2880, 9, 3, 1)
|
||||
pipe.unet.to(memory_format=torch.channels_last) # in-place 연산
|
||||
# 2번째 차원에서 스트라이드 1을 가지는 (2880, 1, 960, 320)로, 연산이 작동함을 증명합니다.
|
||||
print(pipe.unet.conv_out.state_dict()["weight"].stride())
|
||||
```
|
||||
|
||||
## 추적(tracing)
|
||||
|
||||
추적은 모델을 통해 예제 입력 텐서를 통해 실행되는데, 해당 입력이 모델의 레이어를 통과할 때 호출되는 작업을 캡처하여 실행 파일 또는 'ScriptFunction'이 반환되도록 하고, 이는 just-in-time 컴파일로 최적화됩니다.
|
||||
|
||||
UNet 모델을 추적하기 위해 다음을 사용할 수 있습니다:
|
||||
|
||||
```python
|
||||
import time
|
||||
import torch
|
||||
from diffusers import StableDiffusionPipeline
|
||||
import functools
|
||||
|
||||
# torch 기울기 비활성화
|
||||
torch.set_grad_enabled(False)
|
||||
|
||||
# 변수 설정
|
||||
n_experiments = 2
|
||||
unet_runs_per_experiment = 50
|
||||
|
||||
|
||||
# 입력 불러오기
|
||||
def generate_inputs():
|
||||
sample = torch.randn(2, 4, 64, 64).half().cuda()
|
||||
timestep = torch.rand(1).half().cuda() * 999
|
||||
encoder_hidden_states = torch.randn(2, 77, 768).half().cuda()
|
||||
return sample, timestep, encoder_hidden_states
|
||||
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
torch_dtype=torch.float16,
|
||||
).to("cuda")
|
||||
unet = pipe.unet
|
||||
unet.eval()
|
||||
unet.to(memory_format=torch.channels_last) # Channels Last 메모리 형식 사용
|
||||
unet.forward = functools.partial(unet.forward, return_dict=False) # return_dict=False을 기본값으로 설정
|
||||
|
||||
# 워밍업
|
||||
for _ in range(3):
|
||||
with torch.inference_mode():
|
||||
inputs = generate_inputs()
|
||||
orig_output = unet(*inputs)
|
||||
|
||||
# 추적
|
||||
print("tracing..")
|
||||
unet_traced = torch.jit.trace(unet, inputs)
|
||||
unet_traced.eval()
|
||||
print("done tracing")
|
||||
|
||||
|
||||
# 워밍업 및 그래프 최적화
|
||||
for _ in range(5):
|
||||
with torch.inference_mode():
|
||||
inputs = generate_inputs()
|
||||
orig_output = unet_traced(*inputs)
|
||||
|
||||
|
||||
# 벤치마킹
|
||||
with torch.inference_mode():
|
||||
for _ in range(n_experiments):
|
||||
torch.cuda.synchronize()
|
||||
start_time = time.time()
|
||||
for _ in range(unet_runs_per_experiment):
|
||||
orig_output = unet_traced(*inputs)
|
||||
torch.cuda.synchronize()
|
||||
print(f"unet traced inference took {time.time() - start_time:.2f} seconds")
|
||||
for _ in range(n_experiments):
|
||||
torch.cuda.synchronize()
|
||||
start_time = time.time()
|
||||
for _ in range(unet_runs_per_experiment):
|
||||
orig_output = unet(*inputs)
|
||||
torch.cuda.synchronize()
|
||||
print(f"unet inference took {time.time() - start_time:.2f} seconds")
|
||||
|
||||
# 모델 저장
|
||||
unet_traced.save("unet_traced.pt")
|
||||
```
|
||||
|
||||
그 다음, 파이프라인의 `unet` 특성을 다음과 같이 추적된 모델로 바꿀 수 있습니다.
|
||||
|
||||
```python
|
||||
from diffusers import StableDiffusionPipeline
|
||||
import torch
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class UNet2DConditionOutput:
|
||||
sample: torch.FloatTensor
|
||||
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
torch_dtype=torch.float16,
|
||||
).to("cuda")
|
||||
|
||||
# jitted unet 사용
|
||||
unet_traced = torch.jit.load("unet_traced.pt")
|
||||
|
||||
|
||||
# pipe.unet 삭제
|
||||
class TracedUNet(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.in_channels = pipe.unet.in_channels
|
||||
self.device = pipe.unet.device
|
||||
|
||||
def forward(self, latent_model_input, t, encoder_hidden_states):
|
||||
sample = unet_traced(latent_model_input, t, encoder_hidden_states)[0]
|
||||
return UNet2DConditionOutput(sample=sample)
|
||||
|
||||
|
||||
pipe.unet = TracedUNet()
|
||||
|
||||
with torch.inference_mode():
|
||||
image = pipe([prompt] * 1, num_inference_steps=50).images[0]
|
||||
```
|
||||
|
||||
|
||||
## Memory-efficient attention
|
||||
|
||||
어텐션 블록의 대역폭을 최적화하는 최근 작업으로 GPU 메모리 사용량이 크게 향상되고 향상되었습니다.
|
||||
@tridao의 가장 최근의 플래시 어텐션: [code](https://github.com/HazyResearch/flash-attention), [paper](https://arxiv.org/pdf/2205.14135.pdf).
|
||||
|
||||
배치 크기 1(프롬프트 1개)의 512x512 크기로 추론을 실행할 때 몇 가지 Nvidia GPU에서 얻은 속도 향상은 다음과 같습니다:
|
||||
|
||||
| GPU | 기준 어텐션 FP16 | 메모리 효율적인 어텐션 FP16 |
|
||||
|------------------ |--------------------- |--------------------------------- |
|
||||
| NVIDIA Tesla T4 | 3.5it/s | 5.5it/s |
|
||||
| NVIDIA 3060 RTX | 4.6it/s | 7.8it/s |
|
||||
| NVIDIA A10G | 8.88it/s | 15.6it/s |
|
||||
| NVIDIA RTX A6000 | 11.7it/s | 21.09it/s |
|
||||
| NVIDIA TITAN RTX | 12.51it/s | 18.22it/s |
|
||||
| A100-SXM4-40GB | 18.6it/s | 29.it/s |
|
||||
| A100-SXM-80GB | 18.7it/s | 29.5it/s |
|
||||
|
||||
이를 활용하려면 다음을 만족해야 합니다:
|
||||
- PyTorch > 1.12
|
||||
- Cuda 사용 가능
|
||||
- [xformers 라이브러리를 설치함](xformers)
|
||||
```python
|
||||
from diffusers import StableDiffusionPipeline
|
||||
import torch
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
torch_dtype=torch.float16,
|
||||
).to("cuda")
|
||||
|
||||
pipe.enable_xformers_memory_efficient_attention()
|
||||
|
||||
with torch.inference_mode():
|
||||
sample = pipe("a small cat")
|
||||
|
||||
# 선택: 이를 비활성화 하기 위해 다음을 사용할 수 있습니다.
|
||||
# pipe.disable_xformers_memory_efficient_attention()
|
||||
```
|
||||
71
docs/source/ko/optimization/habana.mdx
Normal file
71
docs/source/ko/optimization/habana.mdx
Normal file
@@ -0,0 +1,71 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Habana Gaudi에서 Stable Diffusion을 사용하는 방법
|
||||
|
||||
🤗 Diffusers는 🤗 [Optimum Habana](https://huggingface.co/docs/optimum/habana/usage_guides/stable_diffusion)를 통해서 Habana Gaudi와 호환됩니다.
|
||||
|
||||
## 요구 사항
|
||||
|
||||
- Optimum Habana 1.4 또는 이후, [여기](https://huggingface.co/docs/optimum/habana/installation)에 설치하는 방법이 있습니다.
|
||||
- SynapseAI 1.8.
|
||||
|
||||
|
||||
## 추론 파이프라인
|
||||
|
||||
Gaudi에서 Stable Diffusion 1 및 2로 이미지를 생성하려면 두 인스턴스를 인스턴스화해야 합니다:
|
||||
- [`GaudiStableDiffusionPipeline`](https://huggingface.co/docs/optimum/habana/package_reference/stable_diffusion_pipeline)이 포함된 파이프라인. 이 파이프라인은 *텍스트-이미지 생성*을 지원합니다.
|
||||
- [`GaudiDDIMScheduler`](https://huggingface.co/docs/optimum/habana/package_reference/stable_diffusion_pipeline#optimum.habana.diffusers.GaudiDDIMScheduler)이 포함된 스케줄러. 이 스케줄러는 Habana Gaudi에 최적화되어 있습니다.
|
||||
|
||||
파이프라인을 초기화할 때, HPU에 배포하기 위해 `use_habana=True`를 지정해야 합니다.
|
||||
또한 가능한 가장 빠른 생성을 위해 `use_hpu_graphs=True`로 **HPU 그래프**를 활성화해야 합니다.
|
||||
마지막으로, [Hugging Face Hub](https://huggingface.co/Habana)에서 다운로드할 수 있는 [Gaudi configuration](https://huggingface.co/docs/optimum/habana/package_reference/gaudi_config)을 지정해야 합니다.
|
||||
|
||||
```python
|
||||
from optimum.habana import GaudiConfig
|
||||
from optimum.habana.diffusers import GaudiDDIMScheduler, GaudiStableDiffusionPipeline
|
||||
|
||||
model_name = "stabilityai/stable-diffusion-2-base"
|
||||
scheduler = GaudiDDIMScheduler.from_pretrained(model_name, subfolder="scheduler")
|
||||
pipeline = GaudiStableDiffusionPipeline.from_pretrained(
|
||||
model_name,
|
||||
scheduler=scheduler,
|
||||
use_habana=True,
|
||||
use_hpu_graphs=True,
|
||||
gaudi_config="Habana/stable-diffusion",
|
||||
)
|
||||
```
|
||||
|
||||
파이프라인을 호출하여 하나 이상의 프롬프트에서 배치별로 이미지를 생성할 수 있습니다.
|
||||
|
||||
```python
|
||||
outputs = pipeline(
|
||||
prompt=[
|
||||
"High quality photo of an astronaut riding a horse in space",
|
||||
"Face of a yellow cat, high resolution, sitting on a park bench",
|
||||
],
|
||||
num_images_per_prompt=10,
|
||||
batch_size=4,
|
||||
)
|
||||
```
|
||||
|
||||
더 많은 정보를 얻기 위해, Optimum Habana의 [문서](https://huggingface.co/docs/optimum/habana/usage_guides/stable_diffusion)와 공식 Github 저장소에 제공된 [예시](https://github.com/huggingface/optimum-habana/tree/main/examples/stable-diffusion)를 확인하세요.
|
||||
|
||||
|
||||
## 벤치마크
|
||||
|
||||
다음은 [Habana/stable-diffusion](https://huggingface.co/Habana/stable-diffusion) Gaudi 구성(혼합 정밀도 bf16/fp32)을 사용하는 Habana first-generation Gaudi 및 Gaudi2의 지연 시간입니다:
|
||||
|
||||
| | Latency (배치 크기 = 1) | Throughput (배치 크기 = 8) |
|
||||
| ---------------------- |:------------------------:|:---------------------------:|
|
||||
| first-generation Gaudi | 4.29s | 0.283 images/s |
|
||||
| Gaudi2 | 1.54s | 0.904 images/s |
|
||||
71
docs/source/ko/optimization/mps.mdx
Normal file
71
docs/source/ko/optimization/mps.mdx
Normal file
@@ -0,0 +1,71 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Apple Silicon (M1/M2)에서 Stable Diffusion을 사용하는 방법
|
||||
|
||||
Diffusers는 Stable Diffusion 추론을 위해 PyTorch `mps`를 사용해 Apple 실리콘과 호환됩니다. 다음은 Stable Diffusion이 있는 M1 또는 M2 컴퓨터를 사용하기 위해 따라야 하는 단계입니다.
|
||||
|
||||
## 요구 사항
|
||||
|
||||
- Apple silicon (M1/M2) 하드웨어의 Mac 컴퓨터.
|
||||
- macOS 12.6 또는 이후 (13.0 또는 이후 추천).
|
||||
- Python arm64 버전
|
||||
- PyTorch 2.0(추천) 또는 1.13(`mps`를 지원하는 최소 버전). Yhttps://pytorch.org/get-started/locally/의 지침에 따라 `pip` 또는 `conda`로 설치할 수 있습니다.
|
||||
|
||||
|
||||
## 추론 파이프라인
|
||||
|
||||
아래 코도는 익숙한 `to()` 인터페이스를 사용하여 `mps` 백엔드로 Stable Diffusion 파이프라인을 M1 또는 M2 장치로 이동하는 방법을 보여줍니다.
|
||||
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
**PyTorch 1.13을 사용 중일 때 ** 추가 일회성 전달을 사용하여 파이프라인을 "프라이밍"하는 것을 추천합니다. 이것은 발견한 이상한 문제에 대한 임시 해결 방법입니다. 첫 번째 추론 전달은 후속 전달와 약간 다른 결과를 생성합니다. 이 전달은 한 번만 수행하면 되며 추론 단계를 한 번만 사용하고 결과를 폐기해도 됩니다.
|
||||
|
||||
</Tip>
|
||||
|
||||
이전 팁에서 설명한 것들을 포함한 여러 문제를 해결하므로 PyTorch 2 이상을 사용하는 것이 좋습니다.
|
||||
|
||||
|
||||
```python
|
||||
# `huggingface-cli login`에 로그인되어 있음을 확인
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
||||
pipe = pipe.to("mps")
|
||||
|
||||
# 컴퓨터가 64GB 이하의 RAM 램일 때 추천
|
||||
pipe.enable_attention_slicing()
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
|
||||
# 처음 "워밍업" 전달 (위 설명을 보세요)
|
||||
_ = pipe(prompt, num_inference_steps=1)
|
||||
|
||||
# 결과는 워밍업 전달 후의 CPU 장치의 결과와 일치합니다.
|
||||
image = pipe(prompt).images[0]
|
||||
```
|
||||
|
||||
## 성능 추천
|
||||
|
||||
M1/M2 성능은 메모리 압력에 매우 민감합니다. 시스템은 필요한 경우 자동으로 스왑되지만 스왑할 때 성능이 크게 저하됩니다.
|
||||
|
||||
|
||||
특히 컴퓨터의 시스템 RAM이 64GB 미만이거나 512 × 512픽셀보다 큰 비표준 해상도에서 이미지를 생성하는 경우, 추론 중에 메모리 압력을 줄이고 스와핑을 방지하기 위해 *어텐션 슬라이싱*을 사용하는 것이 좋습니다. 어텐션 슬라이싱은 비용이 많이 드는 어텐션 작업을 한 번에 모두 수행하는 대신 여러 단계로 수행합니다. 일반적으로 범용 메모리가 없는 컴퓨터에서 ~20%의 성능 영향을 미치지만 64GB 이상이 아닌 경우 대부분의 Apple Silicon 컴퓨터에서 *더 나은 성능*이 관찰되었습니다.
|
||||
|
||||
```python
|
||||
pipeline.enable_attention_slicing()
|
||||
```
|
||||
|
||||
## Known Issues
|
||||
|
||||
- 여러 프롬프트를 배치로 생성하는 것은 [충돌이 발생하거나 안정적으로 작동하지 않습니다](https://github.com/huggingface/diffusers/issues/363). 우리는 이것이 [PyTorch의 `mps` 백엔드](https://github.com/pytorch/pytorch/issues/84039)와 관련이 있다고 생각합니다. 이 문제는 해결되고 있지만 지금은 배치 대신 반복 방법을 사용하는 것이 좋습니다.
|
||||
65
docs/source/ko/optimization/onnx.mdx
Normal file
65
docs/source/ko/optimization/onnx.mdx
Normal file
@@ -0,0 +1,65 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
|
||||
# 추론을 위해 ONNX 런타임을 사용하는 방법
|
||||
|
||||
🤗 Diffusers는 ONNX Runtime과 호환되는 Stable Diffusion 파이프라인을 제공합니다. 이를 통해 ONNX(CPU 포함)를 지원하고 PyTorch의 가속 버전을 사용할 수 없는 모든 하드웨어에서 Stable Diffusion을 실행할 수 있습니다.
|
||||
|
||||
## 설치
|
||||
|
||||
다음 명령어로 ONNX Runtime를 지원하는 🤗 Optimum를 설치합니다:
|
||||
|
||||
```
|
||||
pip install optimum["onnxruntime"]
|
||||
```
|
||||
|
||||
## Stable Diffusion 추론
|
||||
|
||||
아래 코드는 ONNX 런타임을 사용하는 방법을 보여줍니다. `StableDiffusionPipeline` 대신 `OnnxStableDiffusionPipeline`을 사용해야 합니다.
|
||||
PyTorch 모델을 불러오고 즉시 ONNX 형식으로 변환하려는 경우 `export=True`로 설정합니다.
|
||||
|
||||
```python
|
||||
from optimum.onnxruntime import ORTStableDiffusionPipeline
|
||||
|
||||
model_id = "runwayml/stable-diffusion-v1-5"
|
||||
pipe = ORTStableDiffusionPipeline.from_pretrained(model_id, export=True)
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
images = pipe(prompt).images[0]
|
||||
pipe.save_pretrained("./onnx-stable-diffusion-v1-5")
|
||||
```
|
||||
|
||||
파이프라인을 ONNX 형식으로 오프라인으로 내보내고 나중에 추론에 사용하려는 경우,
|
||||
[`optimum-cli export`](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model#exporting-a-model-to-onnx-using-the-cli) 명령어를 사용할 수 있습니다:
|
||||
|
||||
```bash
|
||||
optimum-cli export onnx --model runwayml/stable-diffusion-v1-5 sd_v15_onnx/
|
||||
```
|
||||
|
||||
그 다음 추론을 수행합니다:
|
||||
|
||||
```python
|
||||
from optimum.onnxruntime import ORTStableDiffusionPipeline
|
||||
|
||||
model_id = "sd_v15_onnx"
|
||||
pipe = ORTStableDiffusionPipeline.from_pretrained(model_id)
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
images = pipe(prompt).images[0]
|
||||
```
|
||||
|
||||
Notice that we didn't have to specify `export=True` above.
|
||||
|
||||
[Optimum 문서](https://huggingface.co/docs/optimum/)에서 더 많은 예시를 찾을 수 있습니다.
|
||||
|
||||
## 알려진 이슈들
|
||||
|
||||
- 여러 프롬프트를 배치로 생성하면 너무 많은 메모리가 사용되는 것 같습니다. 이를 조사하는 동안, 배치 대신 반복 방법이 필요할 수도 있습니다.
|
||||
39
docs/source/ko/optimization/open_vino.mdx
Normal file
39
docs/source/ko/optimization/open_vino.mdx
Normal file
@@ -0,0 +1,39 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# 추론을 위한 OpenVINO 사용 방법
|
||||
|
||||
🤗 [Optimum](https://github.com/huggingface/optimum-intel)은 OpenVINO와 호환되는 Stable Diffusion 파이프라인을 제공합니다.
|
||||
이제 다양한 Intel 프로세서에서 OpenVINO Runtime으로 쉽게 추론을 수행할 수 있습니다. ([여기](https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html)서 지원되는 전 기기 목록을 확인하세요).
|
||||
|
||||
## 설치
|
||||
|
||||
다음 명령어로 🤗 Optimum을 설치합니다:
|
||||
|
||||
```
|
||||
pip install optimum["openvino"]
|
||||
```
|
||||
|
||||
## Stable Diffusion 추론
|
||||
|
||||
OpenVINO 모델을 불러오고 OpenVINO 런타임으로 추론을 실행하려면 `StableDiffusionPipeline`을 `OVStableDiffusionPipeline`으로 교체해야 합니다. PyTorch 모델을 불러오고 즉시 OpenVINO 형식으로 변환하려는 경우 `export=True`로 설정합니다.
|
||||
|
||||
```python
|
||||
from optimum.intel.openvino import OVStableDiffusionPipeline
|
||||
|
||||
model_id = "runwayml/stable-diffusion-v1-5"
|
||||
pipe = OVStableDiffusionPipeline.from_pretrained(model_id, export=True)
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
images = pipe(prompt).images[0]
|
||||
```
|
||||
|
||||
[Optimum 문서](https://huggingface.co/docs/optimum/intel/inference#export-and-inference-of-stable-diffusion-models)에서 (정적 reshaping과 모델 컴파일 등의) 더 많은 예시들을 찾을 수 있습니다.
|
||||
36
docs/source/ko/optimization/xformers.mdx
Normal file
36
docs/source/ko/optimization/xformers.mdx
Normal file
@@ -0,0 +1,36 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# xFormers 설치하기
|
||||
|
||||
추론과 학습 모두에 [xFormers](https://github.com/facebookresearch/xformers)를 사용하는 것이 좋습니다.
|
||||
자체 테스트로 어텐션 블록에서 수행된 최적화가 더 빠른 속도와 적은 메모리 소비를 확인했습니다.
|
||||
|
||||
2023년 1월에 출시된 xFormers 버전 '0.0.16'부터 사전 빌드된 pip wheel을 사용하여 쉽게 설치할 수 있습니다:
|
||||
|
||||
```bash
|
||||
pip install xformers
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
xFormers PIP 패키지에는 최신 버전의 PyTorch(xFormers 0.0.16에 1.13.1)가 필요합니다. 이전 버전의 PyTorch를 사용해야 하는 경우 [프로젝트 지침](https://github.com/facebookresearch/xformers#installing-xformers)의 소스를 사용해 xFormers를 설치하는 것이 좋습니다.
|
||||
|
||||
</Tip>
|
||||
|
||||
xFormers를 설치하면, [여기](fp16#memory-efficient-attention)서 설명한 것처럼 'enable_xformers_memory_efficient_attention()'을 사용하여 추론 속도를 높이고 메모리 소비를 줄일 수 있습니다.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
[이 이슈](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212)에 따르면 xFormers `v0.0.16`에서 GPU를 사용한 학습(파인 튜닝 또는 Dreambooth)을 할 수 없습니다. 해당 문제가 발견되면. 해당 코멘트를 참고해 development 버전을 설치하세요.
|
||||
|
||||
</Tip>
|
||||
475
docs/source/ko/training/dreambooth.mdx
Normal file
475
docs/source/ko/training/dreambooth.mdx
Normal file
@@ -0,0 +1,475 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# DreamBooth
|
||||
|
||||
[DreamBooth](https://arxiv.org/abs/2208.12242)는 한 주제에 대한 적은 이미지(3~5개)만으로도 stable diffusion과 같이 text-to-image 모델을 개인화할 수 있는 방법입니다. 이를 통해 모델은 다양한 장면, 포즈 및 장면(뷰)에서 피사체에 대해 맥락화(contextualized)된 이미지를 생성할 수 있습니다.
|
||||
|
||||

|
||||
<a href="https://dreambooth.github.io">project's blog.</a></small>
|
||||
<small><a href="https://dreambooth.github.io">프로젝트 블로그</a>에서의 Dreambooth 예시</small>
|
||||
|
||||
|
||||
이 가이드는 다양한 GPU, Flax 사양에 대해 [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4) 모델로 DreamBooth를 파인튜닝하는 방법을 보여줍니다. 더 깊이 파고들어 작동 방식을 확인하는 데 관심이 있는 경우, 이 가이드에 사용된 DreamBooth의 모든 학습 스크립트를 [여기](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)에서 찾을 수 있습니다.
|
||||
|
||||
스크립트를 실행하기 전에 라이브러리의 학습에 필요한 dependencies를 설치해야 합니다. 또한 `main` GitHub 브랜치에서 🧨 Diffusers를 설치하는 것이 좋습니다.
|
||||
|
||||
```bash
|
||||
pip install git+https://github.com/huggingface/diffusers
|
||||
pip install -U -r diffusers/examples/dreambooth/requirements.txt
|
||||
```
|
||||
|
||||
xFormers는 학습에 필요한 요구 사항은 아니지만, 가능하면 [설치](../optimization/xformers)하는 것이 좋습니다. 학습 속도를 높이고 메모리 사용량을 줄일 수 있기 때문입니다.
|
||||
|
||||
모든 dependencies을 설정한 후 다음을 사용하여 [🤗 Accelerate](https://github.com/huggingface/accelerate/) 환경을 다음과 같이 초기화합니다:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
별도 설정 없이 기본 🤗 Accelerate 환경을 설치하려면 다음을 실행합니다:
|
||||
|
||||
```bash
|
||||
accelerate config default
|
||||
```
|
||||
|
||||
또는 현재 환경이 노트북과 같은 대화형 셸을 지원하지 않는 경우 다음을 사용할 수 있습니다:
|
||||
|
||||
```py
|
||||
from accelerate.utils import write_basic_config
|
||||
|
||||
write_basic_config()
|
||||
```
|
||||
|
||||
## 파인튜닝
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
DreamBooth 파인튜닝은 하이퍼파라미터에 매우 민감하고 과적합되기 쉽습니다. 적절한 하이퍼파라미터를 선택하는 데 도움이 되도록 다양한 권장 설정이 포함된 [심층 분석](https://huggingface.co/blog/dreambooth)을 살펴보는 것이 좋습니다.
|
||||
|
||||
</Tip>
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
[몇 장의 강아지 이미지들](https://drive.google.com/drive/folders/1BO_dyz-p65qhBRRMRA4TbZ8qW4rB99JZ)로 DreamBooth를 시도해봅시다.
|
||||
이를 다운로드해 디렉터리에 저장한 다음 `INSTANCE_DIR` 환경 변수를 해당 경로로 설정합니다:
|
||||
|
||||
|
||||
```bash
|
||||
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
||||
export INSTANCE_DIR="path_to_training_images"
|
||||
export OUTPUT_DIR="path_to_saved_model"
|
||||
```
|
||||
|
||||
그런 다음, 다음 명령을 사용하여 학습 스크립트를 실행할 수 있습니다 (전체 학습 스크립트는 [여기](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py)에서 찾을 수 있습니다):
|
||||
|
||||
```bash
|
||||
accelerate launch train_dreambooth.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--instance_prompt="a photo of sks dog" \
|
||||
--resolution=512 \
|
||||
--train_batch_size=1 \
|
||||
--gradient_accumulation_steps=1 \
|
||||
--learning_rate=5e-6 \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--max_train_steps=400
|
||||
```
|
||||
</pt>
|
||||
<jax>
|
||||
|
||||
TPU에 액세스할 수 있거나 더 빠르게 훈련하고 싶다면 [Flax 학습 스크립트](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_flax.py)를 사용해 볼 수 있습니다. Flax 학습 스크립트는 gradient checkpointing 또는 gradient accumulation을 지원하지 않으므로, 메모리가 30GB 이상인 GPU가 필요합니다.
|
||||
|
||||
스크립트를 실행하기 전에 요구 사항이 설치되어 있는지 확인하십시오.
|
||||
|
||||
```bash
|
||||
pip install -U -r requirements.txt
|
||||
```
|
||||
|
||||
그러면 다음 명령어로 학습 스크립트를 실행시킬 수 있습니다:
|
||||
|
||||
```bash
|
||||
export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
|
||||
export INSTANCE_DIR="path-to-instance-images"
|
||||
export OUTPUT_DIR="path-to-save-model"
|
||||
|
||||
python train_dreambooth_flax.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--instance_prompt="a photo of sks dog" \
|
||||
--resolution=512 \
|
||||
--train_batch_size=1 \
|
||||
--learning_rate=5e-6 \
|
||||
--max_train_steps=400
|
||||
```
|
||||
</jax>
|
||||
</frameworkcontent>
|
||||
|
||||
### Prior-preserving(사전 보존) loss를 사용한 파인튜닝
|
||||
|
||||
과적합과 language drift를 방지하기 위해 사전 보존이 사용됩니다(관심이 있는 경우 [논문](https://arxiv.org/abs/2208.12242)을 참조하세요). 사전 보존을 위해 동일한 클래스의 다른 이미지를 학습 프로세스의 일부로 사용합니다. 좋은 점은 Stable Diffusion 모델 자체를 사용하여 이러한 이미지를 생성할 수 있다는 것입니다! 학습 스크립트는 생성된 이미지를 우리가 지정한 로컬 경로에 저장합니다.
|
||||
|
||||
저자들에 따르면 사전 보존을 위해 `num_epochs * num_samples`개의 이미지를 생성하는 것이 좋습니다. 200-300개에서 대부분 잘 작동합니다.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```bash
|
||||
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
||||
export INSTANCE_DIR="path_to_training_images"
|
||||
export CLASS_DIR="path_to_class_images"
|
||||
export OUTPUT_DIR="path_to_saved_model"
|
||||
|
||||
accelerate launch train_dreambooth.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--class_data_dir=$CLASS_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--with_prior_preservation --prior_loss_weight=1.0 \
|
||||
--instance_prompt="a photo of sks dog" \
|
||||
--class_prompt="a photo of dog" \
|
||||
--resolution=512 \
|
||||
--train_batch_size=1 \
|
||||
--gradient_accumulation_steps=1 \
|
||||
--learning_rate=5e-6 \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
```
|
||||
</pt>
|
||||
<jax>
|
||||
```bash
|
||||
export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
|
||||
export INSTANCE_DIR="path-to-instance-images"
|
||||
export CLASS_DIR="path-to-class-images"
|
||||
export OUTPUT_DIR="path-to-save-model"
|
||||
|
||||
python train_dreambooth_flax.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--class_data_dir=$CLASS_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--with_prior_preservation --prior_loss_weight=1.0 \
|
||||
--instance_prompt="a photo of sks dog" \
|
||||
--class_prompt="a photo of dog" \
|
||||
--resolution=512 \
|
||||
--train_batch_size=1 \
|
||||
--learning_rate=5e-6 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
```
|
||||
</jax>
|
||||
</frameworkcontent>
|
||||
|
||||
## 텍스트 인코더와 and UNet로 파인튜닝하기
|
||||
|
||||
해당 스크립트를 사용하면 `unet`과 함께 `text_encoder`를 파인튜닝할 수 있습니다. 실험에서(자세한 내용은 [🧨 Diffusers를 사용해 DreamBooth로 Stable Diffusion 학습하기](https://huggingface.co/blog/dreambooth) 게시물을 확인하세요), 특히 얼굴 이미지를 생성할 때 훨씬 더 나은 결과를 얻을 수 있습니다.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
텍스트 인코더를 학습시키려면 추가 메모리가 필요해 16GB GPU로는 동작하지 않습니다. 이 옵션을 사용하려면 최소 24GB VRAM이 필요합니다.
|
||||
|
||||
</Tip>
|
||||
|
||||
`--train_text_encoder` 인수를 학습 스크립트에 전달하여 `text_encoder` 및 `unet`을 파인튜닝할 수 있습니다:
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```bash
|
||||
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
||||
export INSTANCE_DIR="path_to_training_images"
|
||||
export CLASS_DIR="path_to_class_images"
|
||||
export OUTPUT_DIR="path_to_saved_model"
|
||||
|
||||
accelerate launch train_dreambooth.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--train_text_encoder \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--class_data_dir=$CLASS_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--with_prior_preservation --prior_loss_weight=1.0 \
|
||||
--instance_prompt="a photo of sks dog" \
|
||||
--class_prompt="a photo of dog" \
|
||||
--resolution=512 \
|
||||
--train_batch_size=1 \
|
||||
--use_8bit_adam
|
||||
--gradient_checkpointing \
|
||||
--learning_rate=2e-6 \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
```
|
||||
</pt>
|
||||
<jax>
|
||||
```bash
|
||||
export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
|
||||
export INSTANCE_DIR="path-to-instance-images"
|
||||
export CLASS_DIR="path-to-class-images"
|
||||
export OUTPUT_DIR="path-to-save-model"
|
||||
|
||||
python train_dreambooth_flax.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--train_text_encoder \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--class_data_dir=$CLASS_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--with_prior_preservation --prior_loss_weight=1.0 \
|
||||
--instance_prompt="a photo of sks dog" \
|
||||
--class_prompt="a photo of dog" \
|
||||
--resolution=512 \
|
||||
--train_batch_size=1 \
|
||||
--learning_rate=2e-6 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
```
|
||||
</jax>
|
||||
</frameworkcontent>
|
||||
|
||||
## LoRA로 파인튜닝하기
|
||||
|
||||
DreamBooth에서 대규모 모델의 학습을 가속화하기 위한 파인튜닝 기술인 LoRA(Low-Rank Adaptation of Large Language Models)를 사용할 수 있습니다. 자세한 내용은 [LoRA 학습](training/lora#dreambooth) 가이드를 참조하세요.
|
||||
|
||||
### 학습 중 체크포인트 저장하기
|
||||
|
||||
Dreambooth로 훈련하는 동안 과적합하기 쉬우므로, 때때로 학습 중에 정기적인 체크포인트를 저장하는 것이 유용합니다. 중간 체크포인트 중 하나가 최종 모델보다 더 잘 작동할 수 있습니다! 체크포인트 저장 기능을 활성화하려면 학습 스크립트에 다음 인수를 전달해야 합니다:
|
||||
|
||||
```bash
|
||||
--checkpointing_steps=500
|
||||
```
|
||||
|
||||
이렇게 하면 `output_dir`의 하위 폴더에 전체 학습 상태가 저장됩니다. 하위 폴더 이름은 접두사 `checkpoint-`로 시작하고 지금까지 수행된 step 수입니다. 예시로 `checkpoint-1500`은 1500 학습 step 후에 저장된 체크포인트입니다.
|
||||
|
||||
#### 저장된 체크포인트에서 훈련 재개하기
|
||||
|
||||
저장된 체크포인트에서 훈련을 재개하려면, `--resume_from_checkpoint` 인수를 전달한 다음 사용할 체크포인트의 이름을 지정하면 됩니다. 특수 문자열 `"latest"`를 사용하여 저장된 마지막 체크포인트(즉, step 수가 가장 많은 체크포인트)에서 재개할 수도 있습니다. 예를 들어 다음은 1500 step 후에 저장된 체크포인트에서부터 학습을 재개합니다:
|
||||
|
||||
```bash
|
||||
--resume_from_checkpoint="checkpoint-1500"
|
||||
```
|
||||
|
||||
원하는 경우 일부 하이퍼파라미터를 조정할 수 있습니다.
|
||||
|
||||
#### 저장된 체크포인트를 사용하여 추론 수행하기
|
||||
|
||||
저장된 체크포인트는 훈련 재개에 적합한 형식으로 저장됩니다. 여기에는 모델 가중치뿐만 아니라 옵티마이저, 데이터 로더 및 학습률의 상태도 포함됩니다.
|
||||
|
||||
**`"accelerate>=0.16.0"`**이 설치된 경우 다음 코드를 사용하여 중간 체크포인트에서 추론을 실행합니다.
|
||||
|
||||
```python
|
||||
from diffusers import DiffusionPipeline, UNet2DConditionModel
|
||||
from transformers import CLIPTextModel
|
||||
import torch
|
||||
|
||||
# 학습에 사용된 것과 동일한 인수(model, revision)로 파이프라인을 로드합니다.
|
||||
model_id = "CompVis/stable-diffusion-v1-4"
|
||||
|
||||
unet = UNet2DConditionModel.from_pretrained("/sddata/dreambooth/daruma-v2-1/checkpoint-100/unet")
|
||||
|
||||
# `args.train_text_encoder`로 학습한 경우면 텍스트 인코더를 꼭 불러오세요
|
||||
text_encoder = CLIPTextModel.from_pretrained("/sddata/dreambooth/daruma-v2-1/checkpoint-100/text_encoder")
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained(model_id, unet=unet, text_encoder=text_encoder, dtype=torch.float16)
|
||||
pipeline.to("cuda")
|
||||
|
||||
# 추론을 수행하거나 저장하거나, 허브에 푸시합니다.
|
||||
pipeline.save_pretrained("dreambooth-pipeline")
|
||||
```
|
||||
|
||||
If you have **`"accelerate<0.16.0"`** installed, you need to convert it to an inference pipeline first:
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
# 학습에 사용된 것과 동일한 인수(model, revision)로 파이프라인을 로드합니다.
|
||||
model_id = "CompVis/stable-diffusion-v1-4"
|
||||
pipeline = DiffusionPipeline.from_pretrained(model_id)
|
||||
|
||||
accelerator = Accelerator()
|
||||
|
||||
# 초기 학습에 `--train_text_encoder`가 사용된 경우 text_encoder를 사용합니다.
|
||||
unet, text_encoder = accelerator.prepare(pipeline.unet, pipeline.text_encoder)
|
||||
|
||||
# 체크포인트 경로로부터 상태를 복원합니다. 여기서는 절대 경로를 사용해야 합니다.
|
||||
accelerator.load_state("/sddata/dreambooth/daruma-v2-1/checkpoint-100")
|
||||
|
||||
# unwrapped 모델로 파이프라인을 다시 빌드합니다.(.unet and .text_encoder로의 할당도 작동해야 합니다)
|
||||
pipeline = DiffusionPipeline.from_pretrained(
|
||||
model_id,
|
||||
unet=accelerator.unwrap_model(unet),
|
||||
text_encoder=accelerator.unwrap_model(text_encoder),
|
||||
)
|
||||
|
||||
# 추론을 수행하거나 저장하거나, 허브에 푸시합니다.
|
||||
pipeline.save_pretrained("dreambooth-pipeline")
|
||||
```
|
||||
|
||||
## 각 GPU 용량에서의 최적화
|
||||
|
||||
하드웨어에 따라 16GB에서 8GB까지 GPU에서 DreamBooth를 최적화하는 몇 가지 방법이 있습니다!
|
||||
|
||||
### xFormers
|
||||
|
||||
[xFormers](https://github.com/facebookresearch/xformers)는 Transformers를 최적화하기 위한 toolbox이며, 🧨 Diffusers에서 사용되는[memory-efficient attention](https://facebookresearch.github.io/xformers/components/ops.html#module-xformers.ops) 메커니즘을 포함하고 있습니다. [xFormers를 설치](./optimization/xformers)한 다음 학습 스크립트에 다음 인수를 추가합니다:
|
||||
|
||||
```bash
|
||||
--enable_xformers_memory_efficient_attention
|
||||
```
|
||||
|
||||
xFormers는 Flax에서 사용할 수 없습니다.
|
||||
|
||||
### 그래디언트 없음으로 설정
|
||||
|
||||
메모리 사용량을 줄일 수 있는 또 다른 방법은 [기울기 설정](https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html)을 0 대신 `None`으로 하는 것입니다. 그러나 이로 인해 특정 동작이 변경될 수 있으므로 문제가 발생하면 이 인수를 제거해 보십시오. 학습 스크립트에 다음 인수를 추가하여 그래디언트를 `None`으로 설정합니다.
|
||||
|
||||
```bash
|
||||
--set_grads_to_none
|
||||
```
|
||||
|
||||
### 16GB GPU
|
||||
|
||||
Gradient checkpointing과 [bitsandbytes](https://github.com/TimDettmers/bitsandbytes)의 8비트 옵티마이저의 도움으로, 16GB GPU에서 dreambooth를 훈련할 수 있습니다. bitsandbytes가 설치되어 있는지 확인하세요:
|
||||
|
||||
```bash
|
||||
pip install bitsandbytes
|
||||
```
|
||||
|
||||
그 다음, 학습 스크립트에 `--use_8bit_adam` 옵션을 명시합니다:
|
||||
|
||||
```bash
|
||||
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
||||
export INSTANCE_DIR="path_to_training_images"
|
||||
export CLASS_DIR="path_to_class_images"
|
||||
export OUTPUT_DIR="path_to_saved_model"
|
||||
|
||||
accelerate launch train_dreambooth.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--class_data_dir=$CLASS_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--with_prior_preservation --prior_loss_weight=1.0 \
|
||||
--instance_prompt="a photo of sks dog" \
|
||||
--class_prompt="a photo of dog" \
|
||||
--resolution=512 \
|
||||
--train_batch_size=1 \
|
||||
--gradient_accumulation_steps=2 --gradient_checkpointing \
|
||||
--use_8bit_adam \
|
||||
--learning_rate=5e-6 \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
```
|
||||
|
||||
### 12GB GPU
|
||||
|
||||
12GB GPU에서 DreamBooth를 실행하려면 gradient checkpointing, 8비트 옵티마이저, xFormers를 활성화하고 그래디언트를 `None`으로 설정해야 합니다.
|
||||
|
||||
```bash
|
||||
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
||||
export INSTANCE_DIR="path-to-instance-images"
|
||||
export CLASS_DIR="path-to-class-images"
|
||||
export OUTPUT_DIR="path-to-save-model"
|
||||
|
||||
accelerate launch train_dreambooth.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--class_data_dir=$CLASS_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--with_prior_preservation --prior_loss_weight=1.0 \
|
||||
--instance_prompt="a photo of sks dog" \
|
||||
--class_prompt="a photo of dog" \
|
||||
--resolution=512 \
|
||||
--train_batch_size=1 \
|
||||
--gradient_accumulation_steps=1 --gradient_checkpointing \
|
||||
--use_8bit_adam \
|
||||
--enable_xformers_memory_efficient_attention \
|
||||
--set_grads_to_none \
|
||||
--learning_rate=2e-6 \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
```
|
||||
|
||||
### 8GB GPU에서 학습하기
|
||||
|
||||
8GB GPU에 대해서는 [DeepSpeed](https://www.deepspeed.ai/)를 사용해 일부 텐서를 VRAM에서 CPU 또는 NVME로 오프로드하여 더 적은 GPU 메모리로 학습할 수도 있습니다.
|
||||
|
||||
🤗 Accelerate 환경을 구성하려면 다음 명령을 실행하세요:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
환경 구성 중에 DeepSpeed를 사용할 것을 확인하세요.
|
||||
그러면 DeepSpeed stage 2, fp16 혼합 정밀도를 결합하고 모델 매개변수와 옵티마이저 상태를 모두 CPU로 오프로드하면 8GB VRAM 미만에서 학습할 수 있습니다.
|
||||
단점은 더 많은 시스템 RAM(약 25GB)이 필요하다는 것입니다. 추가 구성 옵션은 [DeepSpeed 문서](https://huggingface.co/docs/accelerate/usage_guides/deepspeed)를 참조하세요.
|
||||
|
||||
또한 기본 Adam 옵티마이저를 DeepSpeed의 최적화된 Adam 버전으로 변경해야 합니다.
|
||||
이는 상당한 속도 향상을 위한 Adam인 [`deepspeed.ops.adam.DeepSpeedCPUAdam`](https://deepspeed.readthedocs.io/en/latest/optimizers.html#adam-cpu)입니다.
|
||||
`DeepSpeedCPUAdam`을 활성화하려면 시스템의 CUDA toolchain 버전이 PyTorch와 함께 설치된 것과 동일해야 합니다.
|
||||
|
||||
8비트 옵티마이저는 현재 DeepSpeed와 호환되지 않는 것 같습니다.
|
||||
|
||||
다음 명령으로 학습을 시작합니다:
|
||||
|
||||
```bash
|
||||
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
||||
export INSTANCE_DIR="path_to_training_images"
|
||||
export CLASS_DIR="path_to_class_images"
|
||||
export OUTPUT_DIR="path_to_saved_model"
|
||||
|
||||
accelerate launch train_dreambooth.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--class_data_dir=$CLASS_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--with_prior_preservation --prior_loss_weight=1.0 \
|
||||
--instance_prompt="a photo of sks dog" \
|
||||
--class_prompt="a photo of dog" \
|
||||
--resolution=512 \
|
||||
--train_batch_size=1 \
|
||||
--sample_batch_size=1 \
|
||||
--gradient_accumulation_steps=1 --gradient_checkpointing \
|
||||
--learning_rate=5e-6 \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800 \
|
||||
--mixed_precision=fp16
|
||||
```
|
||||
|
||||
## 추론
|
||||
|
||||
모델을 학습한 후에는, 모델이 저장된 경로를 지정해 [`StableDiffusionPipeline`]로 추론을 수행할 수 있습니다. 프롬프트에 학습에 사용된 특수 `식별자`(이전 예시의 `sks`)가 포함되어 있는지 확인하세요.
|
||||
|
||||
**`"accelerate>=0.16.0"`**이 설치되어 있는 경우 다음 코드를 사용하여 중간 체크포인트에서 추론을 실행할 수 있습니다:
|
||||
|
||||
```python
|
||||
from diffusers import StableDiffusionPipeline
|
||||
import torch
|
||||
|
||||
model_id = "path_to_saved_model"
|
||||
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
|
||||
|
||||
prompt = "A photo of sks dog in a bucket"
|
||||
image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
|
||||
|
||||
image.save("dog-bucket.png")
|
||||
```
|
||||
|
||||
[저장된 학습 체크포인트](#inference-from-a-saved-checkpoint)에서도 추론을 실행할 수도 있습니다.
|
||||
128
docs/source/ko/training/lora.mdx
Normal file
128
docs/source/ko/training/lora.mdx
Normal file
@@ -0,0 +1,128 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Low-Rank Adaptation of Large Language Models (LoRA)
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
현재 LoRA는 [`UNet2DConditionalModel`]의 어텐션 레이어에서만 지원됩니다.
|
||||
|
||||
</Tip>
|
||||
|
||||
[LoRA(Low-Rank Adaptation of Large Language Models)](https://arxiv.org/abs/2106.09685)는 메모리를 적게 사용하면서 대규모 모델의 학습을 가속화하는 학습 방법입니다. 이는 rank-decomposition weight 행렬 쌍(**업데이트 행렬**이라고 함)을 추가하고 새로 추가된 가중치**만** 학습합니다. 여기에는 몇 가지 장점이 있습니다.
|
||||
|
||||
- 이전에 미리 학습된 가중치는 고정된 상태로 유지되므로 모델이 [치명적인 망각](https://www.pnas.org/doi/10.1073/pnas.1611835114) 경향이 없습니다.
|
||||
- Rank-decomposition 행렬은 원래 모델보다 파라메터 수가 훨씬 적으므로 학습된 LoRA 가중치를 쉽게 끼워넣을 수 있습니다.
|
||||
- LoRA 매트릭스는 일반적으로 원본 모델의 어텐션 레이어에 추가됩니다. 🧨 Diffusers는 [`~diffusers.loaders.UNet2DConditionLoadersMixin.load_attn_procs`] 메서드를 제공하여 LoRA 가중치를 모델의 어텐션 레이어로 불러옵니다. `scale` 매개변수를 통해 모델이 새로운 학습 이미지에 맞게 조정되는 범위를 제어할 수 있습니다.
|
||||
- 메모리 효율성이 향상되어 Tesla T4, RTX 3080 또는 RTX 2080 Ti와 같은 소비자용 GPU에서 파인튜닝을 실행할 수 있습니다! T4와 같은 GPU는 무료이며 Kaggle 또는 Google Colab 노트북에서 쉽게 액세스할 수 있습니다.
|
||||
|
||||
|
||||
<Tip>
|
||||
|
||||
💡 LoRA는 어텐션 레이어에만 한정되지는 않습니다. 저자는 언어 모델의 어텐션 레이어를 수정하는 것이 매우 효율적으로 죻은 성능을 얻기에 충분하다는 것을 발견했습니다. 이것이 LoRA 가중치를 모델의 어텐션 레이어에 추가하는 것이 일반적인 이유입니다. LoRA 작동 방식에 대한 자세한 내용은 [Using LoRA for effective Stable Diffusion fine-tuning](https://huggingface.co/blog/lora) 블로그를 확인하세요!
|
||||
|
||||
</Tip>
|
||||
|
||||
[cloneofsimo](https://github.com/cloneofsimo)는 인기 있는 [lora](https://github.com/cloneofsimo/lora) GitHub 리포지토리에서 Stable Diffusion을 위한 LoRA 학습을 최초로 시도했습니다. 🧨 Diffusers는 [text-to-image 생성](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image#training-with-lora) 및 [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth#training-with-low-rank-adaptation-of-large-language-models-lora)을 지원합니다. 이 가이드는 두 가지를 모두 수행하는 방법을 보여줍니다.
|
||||
|
||||
모델을 저장하거나 커뮤니티와 공유하려면 Hugging Face 계정에 로그인하세요(아직 계정이 없는 경우 [생성](hf.co/join)하세요):
|
||||
|
||||
```bash
|
||||
huggingface-cli login
|
||||
```
|
||||
|
||||
## Text-to-image
|
||||
|
||||
수십억 개의 파라메터들이 있는 Stable Diffusion과 같은 모델을 파인튜닝하는 것은 느리고 어려울 수 있습니다. LoRA를 사용하면 diffusion 모델을 파인튜닝하는 것이 훨씬 쉽고 빠릅니다. 8비트 옵티마이저와 같은 트릭에 의존하지 않고도 11GB의 GPU RAM으로 하드웨어에서 실행할 수 있습니다.
|
||||
|
||||
|
||||
### 학습 [[text-to-image 학습]]
|
||||
|
||||
[Pokémon BLIP 캡션](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) 데이터셋으로 [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)를 파인튜닝해 나만의 포켓몬을 생성해 보겠습니다.
|
||||
|
||||
시작하려면 `MODEL_NAME` 및 `DATASET_NAME` 환경 변수가 설정되어 있는지 확인하십시오. `OUTPUT_DIR` 및 `HUB_MODEL_ID` 변수는 선택 사항이며 허브에서 모델을 저장할 위치를 지정합니다.
|
||||
|
||||
```bash
|
||||
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
|
||||
export OUTPUT_DIR="/sddata/finetune/lora/pokemon"
|
||||
export HUB_MODEL_ID="pokemon-lora"
|
||||
export DATASET_NAME="lambdalabs/pokemon-blip-captions"
|
||||
```
|
||||
|
||||
학습을 시작하기 전에 알아야 할 몇 가지 플래그가 있습니다.
|
||||
|
||||
* `--push_to_hub`를 명시하면 학습된 LoRA 임베딩을 허브에 저장합니다.
|
||||
* `--report_to=wandb`는 학습 결과를 가중치 및 편향 대시보드에 보고하고 기록합니다(예를 들어, 이 [보고서](https://wandb.ai/pcuenq/text2image-fine-tune/run/b4k1w0tn?workspace=user-pcuenq)를 참조하세요).
|
||||
* `--learning_rate=1e-04`, 일반적으로 LoRA에서 사용하는 것보다 더 높은 학습률을 사용할 수 있습니다.
|
||||
|
||||
이제 학습을 시작할 준비가 되었습니다 (전체 학습 스크립트는 [여기](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py)에서 찾을 수 있습니다).
|
||||
|
||||
```bash
|
||||
accelerate launch train_dreambooth_lora.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--instance_prompt="a photo of sks dog" \
|
||||
--resolution=512 \
|
||||
--train_batch_size=1 \
|
||||
--gradient_accumulation_steps=1 \
|
||||
--checkpointing_steps=100 \
|
||||
--learning_rate=1e-4 \
|
||||
--report_to="wandb" \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--max_train_steps=500 \
|
||||
--validation_prompt="A photo of sks dog in a bucket" \
|
||||
--validation_epochs=50 \
|
||||
--seed="0" \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
### 추론 [[dreambooth 추론]]
|
||||
|
||||
이제 [`StableDiffusionPipeline`]에서 기본 모델을 불러와 추론을 위해 모델을 사용할 수 있습니다:
|
||||
|
||||
```py
|
||||
>>> import torch
|
||||
>>> from diffusers import StableDiffusionPipeline
|
||||
|
||||
>>> model_base = "runwayml/stable-diffusion-v1-5"
|
||||
|
||||
>>> pipe = StableDiffusionPipeline.from_pretrained(model_base, torch_dtype=torch.float16)
|
||||
```
|
||||
|
||||
*기본 모델의 가중치 위에* 파인튜닝된 DreamBooth 모델에서 LoRA 가중치를 로드한 다음, 더 빠른 추론을 위해 파이프라인을 GPU로 이동합니다. LoRA 가중치를 프리징된 사전 훈련된 모델 가중치와 병합할 때, 선택적으로 'scale' 매개변수로 어느 정도의 가중치를 병합할 지 조절할 수 있습니다:
|
||||
|
||||
<Tip>
|
||||
|
||||
💡 `0`의 `scale` 값은 LoRA 가중치를 사용하지 않아 원래 모델의 가중치만 사용한 것과 같고, `1`의 `scale` 값은 파인튜닝된 LoRA 가중치만 사용함을 의미합니다. 0과 1 사이의 값들은 두 결과들 사이로 보간됩니다.
|
||||
|
||||
</Tip>
|
||||
|
||||
```py
|
||||
>>> pipe.unet.load_attn_procs(model_path)
|
||||
>>> pipe.to("cuda")
|
||||
# LoRA 파인튜닝된 모델의 가중치 절반과 기본 모델의 가중치 절반 사용
|
||||
|
||||
>>> image = pipe(
|
||||
... "A picture of a sks dog in a bucket.",
|
||||
... num_inference_steps=25,
|
||||
... guidance_scale=7.5,
|
||||
... cross_attention_kwargs={"scale": 0.5},
|
||||
... ).images[0]
|
||||
# 완전히 파인튜닝된 LoRA 모델의 가중치 사용
|
||||
|
||||
>>> image = pipe("A picture of a sks dog in a bucket.", num_inference_steps=25, guidance_scale=7.5).images[0]
|
||||
>>> image.save("bucket-dog.png")
|
||||
```
|
||||
224
docs/source/ko/training/text2image.mdx
Normal file
224
docs/source/ko/training/text2image.mdx
Normal file
@@ -0,0 +1,224 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
|
||||
# Text-to-image
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
text-to-image 파인튜닝 스크립트는 experimental 상태입니다. 과적합하기 쉽고 치명적인 망각과 같은 문제에 부딪히기 쉽습니다. 자체 데이터셋에서 최상의 결과를 얻으려면 다양한 하이퍼파라미터를 탐색하는 것이 좋습니다.
|
||||
|
||||
</Tip>
|
||||
|
||||
Stable Diffusion과 같은 text-to-image 모델은 텍스트 프롬프트에서 이미지를 생성합니다. 이 가이드는 PyTorch 및 Flax를 사용하여 자체 데이터셋에서 [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4) 모델로 파인튜닝하는 방법을 보여줍니다. 이 가이드에 사용된 text-to-image 파인튜닝을 위한 모든 학습 스크립트에 관심이 있는 경우 이 [리포지토리](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image)에서 자세히 찾을 수 있습니다.
|
||||
|
||||
스크립트를 실행하기 전에, 라이브러리의 학습 dependency들을 설치해야 합니다:
|
||||
|
||||
```bash
|
||||
pip install git+https://github.com/huggingface/diffusers.git
|
||||
pip install -U -r requirements.txt
|
||||
```
|
||||
|
||||
그리고 [🤗Accelerate](https://github.com/huggingface/accelerate/) 환경을 초기화합니다:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
리포지토리를 이미 복제한 경우, 이 단계를 수행할 필요가 없습니다. 대신, 로컬 체크아웃 경로를 학습 스크립트에 명시할 수 있으며 거기에서 로드됩니다.
|
||||
|
||||
### 하드웨어 요구 사항
|
||||
|
||||
`gradient_checkpointing` 및 `mixed_precision`을 사용하면 단일 24GB GPU에서 모델을 파인튜닝할 수 있습니다. 더 높은 `batch_size`와 더 빠른 훈련을 위해서는 GPU 메모리가 30GB 이상인 GPU를 사용하는 것이 좋습니다. TPU 또는 GPU에서 파인튜닝을 위해 JAX나 Flax를 사용할 수도 있습니다. 자세한 내용은 [아래](#flax-jax-finetuning)를 참조하세요.
|
||||
|
||||
xFormers로 memory efficient attention을 활성화하여 메모리 사용량 훨씬 더 줄일 수 있습니다. [xFormers가 설치](./optimization/xformers)되어 있는지 확인하고 `--enable_xformers_memory_efficient_attention`를 학습 스크립트에 명시합니다.
|
||||
|
||||
xFormers는 Flax에 사용할 수 없습니다.
|
||||
|
||||
## Hub에 모델 업로드하기
|
||||
|
||||
학습 스크립트에 다음 인수를 추가하여 모델을 허브에 저장합니다:
|
||||
|
||||
```bash
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
|
||||
## 체크포인트 저장 및 불러오기
|
||||
|
||||
학습 중 발생할 수 있는 일에 대비하여 정기적으로 체크포인트를 저장해 두는 것이 좋습니다. 체크포인트를 저장하려면 학습 스크립트에 다음 인수를 명시합니다.
|
||||
|
||||
```bash
|
||||
--checkpointing_steps=500
|
||||
```
|
||||
|
||||
500스텝마다 전체 학습 state가 'output_dir'의 하위 폴더에 저장됩니다. 체크포인트는 'checkpoint-'에 지금까지 학습된 step 수입니다. 예를 들어 'checkpoint-1500'은 1500 학습 step 후에 저장된 체크포인트입니다.
|
||||
|
||||
학습을 재개하기 위해 체크포인트를 불러오려면 '--resume_from_checkpoint' 인수를 학습 스크립트에 명시하고 재개할 체크포인트를 지정하십시오. 예를 들어 다음 인수는 1500개의 학습 step 후에 저장된 체크포인트에서부터 훈련을 재개합니다.
|
||||
|
||||
```bash
|
||||
--resume_from_checkpoint="checkpoint-1500"
|
||||
```
|
||||
|
||||
## 파인튜닝
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
다음과 같이 [Pokémon BLIP 캡션](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) 데이터셋에서 파인튜닝 실행을 위해 [PyTorch 학습 스크립트](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py)를 실행합니다:
|
||||
|
||||
|
||||
```bash
|
||||
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
||||
export dataset_name="lambdalabs/pokemon-blip-captions"
|
||||
|
||||
accelerate launch train_text_to_image.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--dataset_name=$dataset_name \
|
||||
--use_ema \
|
||||
--resolution=512 --center_crop --random_flip \
|
||||
--train_batch_size=1 \
|
||||
--gradient_accumulation_steps=4 \
|
||||
--gradient_checkpointing \
|
||||
--mixed_precision="fp16" \
|
||||
--max_train_steps=15000 \
|
||||
--learning_rate=1e-05 \
|
||||
--max_grad_norm=1 \
|
||||
--lr_scheduler="constant" --lr_warmup_steps=0 \
|
||||
--output_dir="sd-pokemon-model"
|
||||
```
|
||||
|
||||
자체 데이터셋으로 파인튜닝하려면 🤗 [Datasets](https://huggingface.co/docs/datasets/index)에서 요구하는 형식에 따라 데이터셋을 준비하세요. [데이터셋을 허브에 업로드](https://huggingface.co/docs/datasets/image_dataset#upload-dataset-to-the-hub)하거나 [파일들이 있는 로컬 폴더를 준비](https ://huggingface.co/docs/datasets/image_dataset#imagefolder)할 수 있습니다.
|
||||
|
||||
사용자 커스텀 loading logic을 사용하려면 스크립트를 수정하십시오. 도움이 되도록 코드의 적절한 위치에 포인터를 남겼습니다. 🤗 아래 예제 스크립트는 `TRAIN_DIR`의 로컬 데이터셋으로를 파인튜닝하는 방법과 `OUTPUT_DIR`에서 모델을 저장할 위치를 보여줍니다:
|
||||
|
||||
|
||||
```bash
|
||||
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
||||
export TRAIN_DIR="path_to_your_dataset"
|
||||
export OUTPUT_DIR="path_to_save_model"
|
||||
|
||||
accelerate launch train_text_to_image.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--train_data_dir=$TRAIN_DIR \
|
||||
--use_ema \
|
||||
--resolution=512 --center_crop --random_flip \
|
||||
--train_batch_size=1 \
|
||||
--gradient_accumulation_steps=4 \
|
||||
--gradient_checkpointing \
|
||||
--mixed_precision="fp16" \
|
||||
--max_train_steps=15000 \
|
||||
--learning_rate=1e-05 \
|
||||
--max_grad_norm=1 \
|
||||
--lr_scheduler="constant" --lr_warmup_steps=0 \
|
||||
--output_dir=${OUTPUT_DIR}
|
||||
```
|
||||
|
||||
</pt>
|
||||
<jax>
|
||||
[@duongna211](https://github.com/duongna21)의 기여로, Flax를 사용해 TPU 및 GPU에서 Stable Diffusion 모델을 더 빠르게 학습할 수 있습니다. 이는 TPU 하드웨어에서 매우 효율적이지만 GPU에서도 훌륭하게 작동합니다. Flax 학습 스크립트는 gradient checkpointing나 gradient accumulation과 같은 기능을 아직 지원하지 않으므로 메모리가 30GB 이상인 GPU 또는 TPU v3가 필요합니다.
|
||||
|
||||
스크립트를 실행하기 전에 요구 사항이 설치되어 있는지 확인하십시오:
|
||||
|
||||
```bash
|
||||
pip install -U -r requirements_flax.txt
|
||||
```
|
||||
|
||||
그러면 다음과 같이 [Flax 학습 스크립트](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_flax.py)를 실행할 수 있습니다.
|
||||
|
||||
```bash
|
||||
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
|
||||
export dataset_name="lambdalabs/pokemon-blip-captions"
|
||||
|
||||
python train_text_to_image_flax.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--dataset_name=$dataset_name \
|
||||
--resolution=512 --center_crop --random_flip \
|
||||
--train_batch_size=1 \
|
||||
--max_train_steps=15000 \
|
||||
--learning_rate=1e-05 \
|
||||
--max_grad_norm=1 \
|
||||
--output_dir="sd-pokemon-model"
|
||||
```
|
||||
|
||||
자체 데이터셋으로 파인튜닝하려면 🤗 [Datasets](https://huggingface.co/docs/datasets/index)에서 요구하는 형식에 따라 데이터셋을 준비하세요. [데이터셋을 허브에 업로드](https://huggingface.co/docs/datasets/image_dataset#upload-dataset-to-the-hub)하거나 [파일들이 있는 로컬 폴더를 준비](https ://huggingface.co/docs/datasets/image_dataset#imagefolder)할 수 있습니다.
|
||||
|
||||
사용자 커스텀 loading logic을 사용하려면 스크립트를 수정하십시오. 도움이 되도록 코드의 적절한 위치에 포인터를 남겼습니다. 🤗 아래 예제 스크립트는 `TRAIN_DIR`의 로컬 데이터셋으로를 파인튜닝하는 방법을 보여줍니다:
|
||||
|
||||
```bash
|
||||
export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
|
||||
export TRAIN_DIR="path_to_your_dataset"
|
||||
|
||||
python train_text_to_image_flax.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--train_data_dir=$TRAIN_DIR \
|
||||
--resolution=512 --center_crop --random_flip \
|
||||
--train_batch_size=1 \
|
||||
--mixed_precision="fp16" \
|
||||
--max_train_steps=15000 \
|
||||
--learning_rate=1e-05 \
|
||||
--max_grad_norm=1 \
|
||||
--output_dir="sd-pokemon-model"
|
||||
```
|
||||
</jax>
|
||||
</frameworkcontent>
|
||||
|
||||
## LoRA
|
||||
|
||||
Text-to-image 모델 파인튜닝을 위해, 대규모 모델 학습을 가속화하기 위한 파인튜닝 기술인 LoRA(Low-Rank Adaptation of Large Language Models)를 사용할 수 있습니다. 자세한 내용은 [LoRA 학습](lora#text-to-image) 가이드를 참조하세요.
|
||||
|
||||
## 추론
|
||||
|
||||
허브의 모델 경로 또는 모델 이름을 [`StableDiffusionPipeline`]에 전달하여 추론을 위해 파인 튜닝된 모델을 불러올 수 있습니다:
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```python
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
model_path = "path_to_saved_model"
|
||||
pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16)
|
||||
pipe.to("cuda")
|
||||
|
||||
image = pipe(prompt="yoda").images[0]
|
||||
image.save("yoda-pokemon.png")
|
||||
```
|
||||
</pt>
|
||||
<jax>
|
||||
```python
|
||||
import jax
|
||||
import numpy as np
|
||||
from flax.jax_utils import replicate
|
||||
from flax.training.common_utils import shard
|
||||
from diffusers import FlaxStableDiffusionPipeline
|
||||
|
||||
model_path = "path_to_saved_model"
|
||||
pipe, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16)
|
||||
|
||||
prompt = "yoda pokemon"
|
||||
prng_seed = jax.random.PRNGKey(0)
|
||||
num_inference_steps = 50
|
||||
|
||||
num_samples = jax.device_count()
|
||||
prompt = num_samples * [prompt]
|
||||
prompt_ids = pipeline.prepare_inputs(prompt)
|
||||
|
||||
# shard inputs and rng
|
||||
params = replicate(params)
|
||||
prng_seed = jax.random.split(prng_seed, jax.device_count())
|
||||
prompt_ids = shard(prompt_ids)
|
||||
|
||||
images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
|
||||
images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
|
||||
image.save("yoda-pokemon.png")
|
||||
```
|
||||
</jax>
|
||||
</frameworkcontent>
|
||||
@@ -4,51 +4,79 @@
|
||||
- local: quicktour
|
||||
title: 快速入门
|
||||
- local: stable_diffusion
|
||||
title: Stable Diffusion
|
||||
title: Effective and efficient diffusion
|
||||
- local: installation
|
||||
title: 安装
|
||||
title: 开始
|
||||
- sections:
|
||||
- local: tutorials/tutorial_overview
|
||||
title: Overview
|
||||
- local: using-diffusers/write_own_pipeline
|
||||
title: Understanding models and schedulers
|
||||
- local: tutorials/basic_training
|
||||
title: Train a diffusion model
|
||||
title: Tutorials
|
||||
- sections:
|
||||
- sections:
|
||||
- local: using-diffusers/loading_overview
|
||||
title: Overview
|
||||
- local: using-diffusers/loading
|
||||
title: Loading Pipelines, Models, and Schedulers
|
||||
title: Load pipelines, models, and schedulers
|
||||
- local: using-diffusers/schedulers
|
||||
title: Using different Schedulers
|
||||
- local: using-diffusers/configuration
|
||||
title: Configuring Pipelines, Models, and Schedulers
|
||||
title: Load and compare different schedulers
|
||||
- local: using-diffusers/custom_pipeline_overview
|
||||
title: Loading and Adding Custom Pipelines
|
||||
title: Load community pipelines
|
||||
- local: using-diffusers/kerascv
|
||||
title: Using KerasCV Stable Diffusion Checkpoints in Diffusers
|
||||
title: Load KerasCV Stable Diffusion checkpoints
|
||||
title: Loading & Hub
|
||||
- sections:
|
||||
- local: using-diffusers/pipeline_overview
|
||||
title: Overview
|
||||
- local: using-diffusers/unconditional_image_generation
|
||||
title: Unconditional Image Generation
|
||||
title: Unconditional image generation
|
||||
- local: using-diffusers/conditional_image_generation
|
||||
title: Text-to-Image Generation
|
||||
title: Text-to-image generation
|
||||
- local: using-diffusers/img2img
|
||||
title: Text-Guided Image-to-Image
|
||||
title: Text-guided image-to-image
|
||||
- local: using-diffusers/inpaint
|
||||
title: Text-Guided Image-Inpainting
|
||||
title: Text-guided image-inpainting
|
||||
- local: using-diffusers/depth2img
|
||||
title: Text-Guided Depth-to-Image
|
||||
- local: using-diffusers/controlling_generation
|
||||
title: Controlling generation
|
||||
title: Text-guided depth-to-image
|
||||
- local: using-diffusers/reusing_seeds
|
||||
title: Reusing seeds for deterministic generation
|
||||
title: Improve image quality with deterministic generation
|
||||
- local: using-diffusers/reproducibility
|
||||
title: Reproducibility
|
||||
title: Create reproducible pipelines
|
||||
- local: using-diffusers/custom_pipeline_examples
|
||||
title: Community Pipelines
|
||||
title: Community pipelines
|
||||
- local: using-diffusers/contribute_pipeline
|
||||
title: How to contribute a Pipeline
|
||||
title: How to contribute a community pipeline
|
||||
- local: using-diffusers/using_safetensors
|
||||
title: Using safetensors
|
||||
- local: using-diffusers/stable_diffusion_jax_how_to
|
||||
title: Stable Diffusion in JAX/Flax
|
||||
- local: using-diffusers/weighted_prompts
|
||||
title: Weighting Prompts
|
||||
title: Pipelines for Inference
|
||||
- sections:
|
||||
- local: training/overview
|
||||
title: Overview
|
||||
- local: training/unconditional_training
|
||||
title: Unconditional image generation
|
||||
- local: training/text_inversion
|
||||
title: Textual Inversion
|
||||
- local: training/dreambooth
|
||||
title: DreamBooth
|
||||
- local: training/text2image
|
||||
title: Text-to-image
|
||||
- local: training/lora
|
||||
title: Low-Rank Adaptation of Large Language Models (LoRA)
|
||||
- local: training/controlnet
|
||||
title: ControlNet
|
||||
- local: training/instructpix2pix
|
||||
title: InstructPix2Pix Training
|
||||
- local: training/custom_diffusion
|
||||
title: Custom Diffusion
|
||||
title: Training
|
||||
- sections:
|
||||
- local: using-diffusers/rl
|
||||
title: Reinforcement Learning
|
||||
@@ -59,6 +87,8 @@
|
||||
title: Taking Diffusers Beyond Images
|
||||
title: Using Diffusers
|
||||
- sections:
|
||||
- local: optimization/opt_overview
|
||||
title: Overview
|
||||
- local: optimization/fp16
|
||||
title: Memory and Speed
|
||||
- local: optimization/torch2.0
|
||||
@@ -69,32 +99,26 @@
|
||||
title: ONNX
|
||||
- local: optimization/open_vino
|
||||
title: OpenVINO
|
||||
- local: optimization/coreml
|
||||
title: Core ML
|
||||
- local: optimization/mps
|
||||
title: MPS
|
||||
- local: optimization/habana
|
||||
title: Habana Gaudi
|
||||
- local: optimization/tome
|
||||
title: Token Merging
|
||||
title: Optimization/Special Hardware
|
||||
- sections:
|
||||
- local: training/overview
|
||||
title: Overview
|
||||
- local: training/unconditional_training
|
||||
title: Unconditional Image Generation
|
||||
- local: training/text_inversion
|
||||
title: Textual Inversion
|
||||
- local: training/dreambooth
|
||||
title: DreamBooth
|
||||
- local: training/text2image
|
||||
title: Text-to-image
|
||||
- local: training/lora
|
||||
title: Low-Rank Adaptation of Large Language Models (LoRA)
|
||||
title: Training
|
||||
- sections:
|
||||
- local: conceptual/philosophy
|
||||
title: Philosophy
|
||||
- local: using-diffusers/controlling_generation
|
||||
title: Controlled generation
|
||||
- local: conceptual/contribution
|
||||
title: How to contribute?
|
||||
- local: conceptual/ethical_guidelines
|
||||
title: Diffusers' Ethical Guidelines
|
||||
- local: conceptual/evaluation
|
||||
title: Evaluating Diffusion Models
|
||||
title: Conceptual Guides
|
||||
- sections:
|
||||
- sections:
|
||||
@@ -118,6 +142,8 @@
|
||||
title: AltDiffusion
|
||||
- local: api/pipelines/audio_diffusion
|
||||
title: Audio Diffusion
|
||||
- local: api/pipelines/audioldm
|
||||
title: AudioLDM
|
||||
- local: api/pipelines/cycle_diffusion
|
||||
title: Cycle Diffusion
|
||||
- local: api/pipelines/dance_diffusion
|
||||
@@ -128,6 +154,8 @@
|
||||
title: DDPM
|
||||
- local: api/pipelines/dit
|
||||
title: DiT
|
||||
- local: api/pipelines/if
|
||||
title: IF
|
||||
- local: api/pipelines/latent_diffusion
|
||||
title: Latent Diffusion
|
||||
- local: api/pipelines/paint_by_example
|
||||
@@ -142,6 +170,8 @@
|
||||
title: Score SDE VE
|
||||
- local: api/pipelines/semantic_stable_diffusion
|
||||
title: Semantic Guidance
|
||||
- local: api/pipelines/spectrogram_diffusion
|
||||
title: "Spectrogram Diffusion"
|
||||
- sections:
|
||||
- local: api/pipelines/stable_diffusion/overview
|
||||
title: Overview
|
||||
@@ -171,6 +201,8 @@
|
||||
title: MultiDiffusion Panorama
|
||||
- local: api/pipelines/stable_diffusion/controlnet
|
||||
title: Text-to-Image Generation with ControlNet Conditioning
|
||||
- local: api/pipelines/stable_diffusion/model_editing
|
||||
title: Text-to-Image Model Editing
|
||||
title: Stable Diffusion
|
||||
- local: api/pipelines/stable_diffusion_2
|
||||
title: Stable Diffusion 2
|
||||
@@ -178,6 +210,10 @@
|
||||
title: Stable unCLIP
|
||||
- local: api/pipelines/stochastic_karras_ve
|
||||
title: Stochastic Karras VE
|
||||
- local: api/pipelines/text_to_video
|
||||
title: Text-to-Video
|
||||
- local: api/pipelines/text_to_video_zero
|
||||
title: Text-to-Video Zero
|
||||
- local: api/pipelines/unclip
|
||||
title: UnCLIP
|
||||
- local: api/pipelines/latent_diffusion_uncond
|
||||
@@ -235,4 +271,4 @@
|
||||
- local: api/experimental/rl
|
||||
title: RL Planning
|
||||
title: Experimental Features
|
||||
title: API
|
||||
title: API
|
||||
@@ -18,61 +18,84 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# 🧨 Diffusers
|
||||
|
||||
🤗Diffusers提供了预训练好的视觉和音频扩散模型,并可以作为推理和训练的模块化工具箱。
|
||||
🤗 Diffusers 是一个值得首选用于生成图像、音频甚至 3D 分子结构的,最先进的预训练扩散模型库。
|
||||
无论您是在寻找简单的推理解决方案,还是想训练自己的扩散模型,🤗 Diffusers 这一模块化工具箱都能对其提供支持。
|
||||
本库的设计更偏重于[可用而非高性能](conceptual/philosophy#usability-over-performance)、[简明而非简单](conceptual/philosophy#simple-over-easy)以及[易用而非抽象](conceptual/philosophy#tweakable-contributorfriendly-over-abstraction)。
|
||||
|
||||
更准确地说,🤗Diffusers提供了:
|
||||
|
||||
- 最先进的扩散管道,可以在推理中仅用几行代码运行(详情看[**Using Diffusers**](./using-diffusers/conditional_image_generation))或看[**管道**](#pipelines) 以获取所有支持的管道及其对应的论文的概述。
|
||||
- 可以在推理中交替使用的各种噪声调度程序,以便在推理过程中权衡如何选择速度和质量。有关更多信息,可以看[**Schedulers**](./api/schedulers/overview)。
|
||||
- 多种类型的模型,如U-Net,可用作端到端扩散系统中的构建模块。有关更多详细信息,可以看 [**Models**](./api/models) 。
|
||||
- 训练示例,展示如何训练最流行的扩散模型任务。更多相关信息,可以看[**Training**](./training/overview)。
|
||||
本库包含三个主要组件:
|
||||
|
||||
- 最先进的扩散管道 [diffusion pipelines](api/pipelines/overview),只需几行代码即可进行推理。
|
||||
- 可交替使用的各种噪声调度器 [noise schedulers](api/schedulers/overview),用于平衡生成速度和质量。
|
||||
- 预训练模型 [models](api/models),可作为构建模块,并与调度程序结合使用,来创建您自己的端到端扩散系统。
|
||||
|
||||
<div class="mt-10">
|
||||
<div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5">
|
||||
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./tutorials/tutorial_overview"
|
||||
><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div>
|
||||
<p class="text-gray-700">Learn the fundamental skills you need to start generating outputs, build your own diffusion system, and train a diffusion model. We recommend starting here if you're using 🤗 Diffusers for the first time!</p>
|
||||
</a>
|
||||
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./using-diffusers/loading_overview"
|
||||
><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div>
|
||||
<p class="text-gray-700">Practical guides for helping you load pipelines, models, and schedulers. You'll also learn how to use pipelines for specific tasks, control how outputs are generated, optimize for inference speed, and different training techniques.</p>
|
||||
</a>
|
||||
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./conceptual/philosophy"
|
||||
><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div>
|
||||
<p class="text-gray-700">Understand why the library was designed the way it was, and learn more about the ethical guidelines and safety implementations for using the library.</p>
|
||||
</a>
|
||||
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./api/models"
|
||||
><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div>
|
||||
<p class="text-gray-700">Technical descriptions of how 🤗 Diffusers classes and methods work.</p>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
## 🧨 Diffusers pipelines
|
||||
|
||||
下表总结了所有官方支持的pipelines及其对应的论文,部分提供了colab,可以直接尝试一下。
|
||||
下表汇总了当前所有官方支持的pipelines及其对应的论文.
|
||||
|
||||
|
||||
| 管道 | 论文 | 任务 | Colab
|
||||
|---|---|:---:|:---:|
|
||||
| [alt_diffusion](./api/pipelines/alt_diffusion) | [**AltDiffusion**](https://arxiv.org/abs/2211.06679) | Image-to-Image Text-Guided Generation |
|
||||
| [audio_diffusion](./api/pipelines/audio_diffusion) | [**Audio Diffusion**](https://github.com/teticio/audio-diffusion.git) | Unconditional Audio Generation | [](https://colab.research.google.com/github/teticio/audio-diffusion/blob/master/notebooks/audio_diffusion_pipeline.ipynb)
|
||||
| [controlnet](./api/pipelines/stable_diffusion/controlnet) | [**ControlNet with Stable Diffusion**](https://arxiv.org/abs/2302.05543) | Image-to-Image Text-Guided Generation | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/controlnet.ipynb)
|
||||
| [cycle_diffusion](./api/pipelines/cycle_diffusion) | [**Cycle Diffusion**](https://arxiv.org/abs/2210.05559) | Image-to-Image Text-Guided Generation |
|
||||
| [dance_diffusion](./api/pipelines/dance_diffusion) | [**Dance Diffusion**](https://github.com/williamberman/diffusers.git) | Unconditional Audio Generation |
|
||||
| [ddpm](./api/pipelines/ddpm) | [**Denoising Diffusion Probabilistic Models**](https://arxiv.org/abs/2006.11239) | Unconditional Image Generation |
|
||||
| [ddim](./api/pipelines/ddim) | [**Denoising Diffusion Implicit Models**](https://arxiv.org/abs/2010.02502) | Unconditional Image Generation |
|
||||
| [latent_diffusion](./api/pipelines/latent_diffusion) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752)| Text-to-Image Generation |
|
||||
| [latent_diffusion](./api/pipelines/latent_diffusion) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752)| Super Resolution Image-to-Image |
|
||||
| [latent_diffusion_uncond](./api/pipelines/latent_diffusion_uncond) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752) | Unconditional Image Generation |
|
||||
| [paint_by_example](./api/pipelines/paint_by_example) | [**Paint by Example: Exemplar-based Image Editing with Diffusion Models**](https://arxiv.org/abs/2211.13227) | Image-Guided Image Inpainting |
|
||||
| [pndm](./api/pipelines/pndm) | [**Pseudo Numerical Methods for Diffusion Models on Manifolds**](https://arxiv.org/abs/2202.09778) | Unconditional Image Generation |
|
||||
| [score_sde_ve](./api/pipelines/score_sde_ve) | [**Score-Based Generative Modeling through Stochastic Differential Equations**](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
|
||||
| [score_sde_vp](./api/pipelines/score_sde_vp) | [**Score-Based Generative Modeling through Stochastic Differential Equations**](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
|
||||
| [semantic_stable_diffusion](./api/pipelines/semantic_stable_diffusion) | [**Semantic Guidance**](https://arxiv.org/abs/2301.12247) | Text-Guided Generation | [](https://colab.research.google.com/github/ml-research/semantic-image-editing/blob/main/examples/SemanticGuidance.ipynb)
|
||||
| [stable_diffusion_text2img](./api/pipelines/stable_diffusion/text2img) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | Text-to-Image Generation | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb)
|
||||
| [stable_diffusion_img2img](./api/pipelines/stable_diffusion/img2img) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | Image-to-Image Text-Guided Generation | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/image_2_image_using_diffusers.ipynb)
|
||||
| [stable_diffusion_inpaint](./api/pipelines/stable_diffusion/inpaint) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | Text-Guided Image Inpainting | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb)
|
||||
| [stable_diffusion_panorama](./api/pipelines/stable_diffusion/panorama) | [**MultiDiffusion**](https://multidiffusion.github.io/) | Text-to-Panorama Generation |
|
||||
| [stable_diffusion_pix2pix](./api/pipelines/stable_diffusion/pix2pix) | [**InstructPix2Pix**](https://github.com/timothybrooks/instruct-pix2pix) | Text-Guided Image Editing|
|
||||
| [stable_diffusion_pix2pix_zero](./api/pipelines/stable_diffusion/pix2pix_zero) | [**Zero-shot Image-to-Image Translation**](https://pix2pixzero.github.io/) | Text-Guided Image Editing |
|
||||
| [stable_diffusion_attend_and_excite](./api/pipelines/stable_diffusion/attend_and_excite) | [**Attend and Excite for Stable Diffusion**](https://attendandexcite.github.io/Attend-and-Excite/) | Text-to-Image Generation |
|
||||
| [stable_diffusion_self_attention_guidance](./api/pipelines/stable_diffusion/self_attention_guidance) | [**Self-Attention Guidance**](https://ku-cvlab.github.io/Self-Attention-Guidance) | Text-to-Image Generation |
|
||||
| [stable_diffusion_image_variation](./stable_diffusion/image_variation) | [**Stable Diffusion Image Variations**](https://github.com/LambdaLabsML/lambda-diffusers#stable-diffusion-image-variations) | Image-to-Image Generation |
|
||||
| [stable_diffusion_latent_upscale](./stable_diffusion/latent_upscale) | [**Stable Diffusion Latent Upscaler**](https://twitter.com/StabilityAI/status/1590531958815064065) | Text-Guided Super Resolution Image-to-Image |
|
||||
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [**Stable Diffusion 2**](https://stability.ai/blog/stable-diffusion-v2-release) | Text-to-Image Generation |
|
||||
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [**Stable Diffusion 2**](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Image Inpainting |
|
||||
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [**Depth-Conditional Stable Diffusion**](https://github.com/Stability-AI/stablediffusion#depth-conditional-stable-diffusion) | Depth-to-Image Generation |
|
||||
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [**Stable Diffusion 2**](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Super Resolution Image-to-Image |
|
||||
| [stable_diffusion_safe](./api/pipelines/stable_diffusion_safe) | [**Safe Stable Diffusion**](https://arxiv.org/abs/2211.05105) | Text-Guided Generation | [](https://colab.research.google.com/github/ml-research/safe-latent-diffusion/blob/main/examples/Safe%20Latent%20Diffusion.ipynb)
|
||||
| [stable_unclip](./stable_unclip) | **Stable unCLIP** | Text-to-Image Generation |
|
||||
| [stable_unclip](./stable_unclip) | **Stable unCLIP** | Image-to-Image Text-Guided Generation |
|
||||
| [stochastic_karras_ve](./api/pipelines/stochastic_karras_ve) | [**Elucidating the Design Space of Diffusion-Based Generative Models**](https://arxiv.org/abs/2206.00364) | Unconditional Image Generation |
|
||||
| [unclip](./api/pipelines/unclip) | [Hierarchical Text-Conditional Image Generation with CLIP Latents](https://arxiv.org/abs/2204.06125) | Text-to-Image Generation |
|
||||
| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Text-to-Image Generation |
|
||||
| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Image Variations Generation |
|
||||
| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Dual Image and Text Guided Generation |
|
||||
| [vq_diffusion](./api/pipelines/vq_diffusion) | [Vector Quantized Diffusion Model for Text-to-Image Synthesis](https://arxiv.org/abs/2111.14822) | Text-to-Image Generation |
|
||||
|
||||
|
||||
**注意**: 管道是如何使用相应论文中提出的扩散模型的简单示例。
|
||||
| 管道 | 论文/仓库 | 任务 |
|
||||
|---|---|:---:|
|
||||
| [alt_diffusion](./api/pipelines/alt_diffusion) | [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679) | Image-to-Image Text-Guided Generation |
|
||||
| [audio_diffusion](./api/pipelines/audio_diffusion) | [Audio Diffusion](https://github.com/teticio/audio-diffusion.git) | Unconditional Audio Generation |
|
||||
| [controlnet](./api/pipelines/stable_diffusion/controlnet) | [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) | Image-to-Image Text-Guided Generation |
|
||||
| [cycle_diffusion](./api/pipelines/cycle_diffusion) | [Unifying Diffusion Models' Latent Space, with Applications to CycleDiffusion and Guidance](https://arxiv.org/abs/2210.05559) | Image-to-Image Text-Guided Generation |
|
||||
| [dance_diffusion](./api/pipelines/dance_diffusion) | [Dance Diffusion](https://github.com/williamberman/diffusers.git) | Unconditional Audio Generation |
|
||||
| [ddpm](./api/pipelines/ddpm) | [Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2006.11239) | Unconditional Image Generation |
|
||||
| [ddim](./api/pipelines/ddim) | [Denoising Diffusion Implicit Models](https://arxiv.org/abs/2010.02502) | Unconditional Image Generation |
|
||||
| [if](./if) | [**IF**](./api/pipelines/if) | Image Generation |
|
||||
| [if_img2img](./if) | [**IF**](./api/pipelines/if) | Image-to-Image Generation |
|
||||
| [if_inpainting](./if) | [**IF**](./api/pipelines/if) | Image-to-Image Generation |
|
||||
| [latent_diffusion](./api/pipelines/latent_diffusion) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)| Text-to-Image Generation |
|
||||
| [latent_diffusion](./api/pipelines/latent_diffusion) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)| Super Resolution Image-to-Image |
|
||||
| [latent_diffusion_uncond](./api/pipelines/latent_diffusion_uncond) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) | Unconditional Image Generation |
|
||||
| [paint_by_example](./api/pipelines/paint_by_example) | [Paint by Example: Exemplar-based Image Editing with Diffusion Models](https://arxiv.org/abs/2211.13227) | Image-Guided Image Inpainting |
|
||||
| [pndm](./api/pipelines/pndm) | [Pseudo Numerical Methods for Diffusion Models on Manifolds](https://arxiv.org/abs/2202.09778) | Unconditional Image Generation |
|
||||
| [score_sde_ve](./api/pipelines/score_sde_ve) | [Score-Based Generative Modeling through Stochastic Differential Equations](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
|
||||
| [score_sde_vp](./api/pipelines/score_sde_vp) | [Score-Based Generative Modeling through Stochastic Differential Equations](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
|
||||
| [semantic_stable_diffusion](./api/pipelines/semantic_stable_diffusion) | [Semantic Guidance](https://arxiv.org/abs/2301.12247) | Text-Guided Generation |
|
||||
| [stable_diffusion_text2img](./api/pipelines/stable_diffusion/text2img) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Text-to-Image Generation |
|
||||
| [stable_diffusion_img2img](./api/pipelines/stable_diffusion/img2img) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Image-to-Image Text-Guided Generation |
|
||||
| [stable_diffusion_inpaint](./api/pipelines/stable_diffusion/inpaint) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Text-Guided Image Inpainting |
|
||||
| [stable_diffusion_panorama](./api/pipelines/stable_diffusion/panorama) | [MultiDiffusion](https://multidiffusion.github.io/) | Text-to-Panorama Generation |
|
||||
| [stable_diffusion_pix2pix](./api/pipelines/stable_diffusion/pix2pix) | [InstructPix2Pix: Learning to Follow Image Editing Instructions](https://arxiv.org/abs/2211.09800) | Text-Guided Image Editing|
|
||||
| [stable_diffusion_pix2pix_zero](./api/pipelines/stable_diffusion/pix2pix_zero) | [Zero-shot Image-to-Image Translation](https://pix2pixzero.github.io/) | Text-Guided Image Editing |
|
||||
| [stable_diffusion_attend_and_excite](./api/pipelines/stable_diffusion/attend_and_excite) | [Attend-and-Excite: Attention-Based Semantic Guidance for Text-to-Image Diffusion Models](https://arxiv.org/abs/2301.13826) | Text-to-Image Generation |
|
||||
| [stable_diffusion_self_attention_guidance](./api/pipelines/stable_diffusion/self_attention_guidance) | [Improving Sample Quality of Diffusion Models Using Self-Attention Guidance](https://arxiv.org/abs/2210.00939) | Text-to-Image Generation Unconditional Image Generation |
|
||||
| [stable_diffusion_image_variation](./stable_diffusion/image_variation) | [Stable Diffusion Image Variations](https://github.com/LambdaLabsML/lambda-diffusers#stable-diffusion-image-variations) | Image-to-Image Generation |
|
||||
| [stable_diffusion_latent_upscale](./stable_diffusion/latent_upscale) | [Stable Diffusion Latent Upscaler](https://twitter.com/StabilityAI/status/1590531958815064065) | Text-Guided Super Resolution Image-to-Image |
|
||||
| [stable_diffusion_model_editing](./api/pipelines/stable_diffusion/model_editing) | [Editing Implicit Assumptions in Text-to-Image Diffusion Models](https://time-diffusion.github.io/) | Text-to-Image Model Editing |
|
||||
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-to-Image Generation |
|
||||
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Image Inpainting |
|
||||
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Depth-Conditional Stable Diffusion](https://github.com/Stability-AI/stablediffusion#depth-conditional-stable-diffusion) | Depth-to-Image Generation |
|
||||
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Super Resolution Image-to-Image |
|
||||
| [stable_diffusion_safe](./api/pipelines/stable_diffusion_safe) | [Safe Stable Diffusion](https://arxiv.org/abs/2211.05105) | Text-Guided Generation |
|
||||
| [stable_unclip](./stable_unclip) | Stable unCLIP | Text-to-Image Generation |
|
||||
| [stable_unclip](./stable_unclip) | Stable unCLIP | Image-to-Image Text-Guided Generation |
|
||||
| [stochastic_karras_ve](./api/pipelines/stochastic_karras_ve) | [Elucidating the Design Space of Diffusion-Based Generative Models](https://arxiv.org/abs/2206.00364) | Unconditional Image Generation |
|
||||
| [text_to_video_sd](./api/pipelines/text_to_video) | [Modelscope's Text-to-video-synthesis Model in Open Domain](https://modelscope.cn/models/damo/text-to-video-synthesis/summary) | Text-to-Video Generation |
|
||||
| [unclip](./api/pipelines/unclip) | [Hierarchical Text-Conditional Image Generation with CLIP Latents](https://arxiv.org/abs/2204.06125)(implementation by [kakaobrain](https://github.com/kakaobrain/karlo)) | Text-to-Image Generation |
|
||||
| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Text-to-Image Generation |
|
||||
| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Image Variations Generation |
|
||||
| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Dual Image and Text Guided Generation |
|
||||
| [vq_diffusion](./api/pipelines/vq_diffusion) | [Vector Quantized Diffusion Model for Text-to-Image Synthesis](https://arxiv.org/abs/2111.14822) | Text-to-Image Generation |
|
||||
|
||||
@@ -12,7 +12,7 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# 安装
|
||||
|
||||
安装🤗 Diffusers 到你正在使用的任何深度学习框架中。
|
||||
在你正在使用的任意深度学习框架中安装 🤗 Diffusers 。
|
||||
|
||||
🤗 Diffusers已在Python 3.7+、PyTorch 1.7.0+和Flax上进行了测试。按照下面的安装说明,针对你正在使用的深度学习框架进行安装:
|
||||
|
||||
@@ -21,11 +21,11 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
## 使用pip安装
|
||||
|
||||
你需要在[虚拟环境](https://docs.python.org/3/library/venv.html)中安装🤗 Diffusers 。
|
||||
你需要在[虚拟环境](https://docs.python.org/3/library/venv.html)中安装 🤗 Diffusers 。
|
||||
|
||||
如果你对 Python 虚拟环境不熟悉,可以看看这个[教程](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
|
||||
|
||||
使用虚拟环境你可以轻松管理不同的项目,避免了依赖项之间的兼容性问题。
|
||||
在虚拟环境中,你可以轻松管理不同的项目,避免依赖项之间的兼容性问题。
|
||||
|
||||
首先,在你的项目目录下创建一个虚拟环境:
|
||||
|
||||
@@ -39,7 +39,7 @@ python -m venv .env
|
||||
source .env/bin/activate
|
||||
```
|
||||
|
||||
现在你就可以安装 🤗 Diffusers了!使用下边这个命令:
|
||||
现在,你就可以安装 🤗 Diffusers了!使用下边这个命令:
|
||||
|
||||
**PyTorch**
|
||||
|
||||
@@ -55,7 +55,7 @@ pip install diffusers["flax"]
|
||||
|
||||
## 从源代码安装
|
||||
|
||||
在从源代码安装 `diffusers` 之前,你先确定你已经安装了 `torch` 和 `accelerate`。
|
||||
在从源代码安装 `diffusers` 之前,确保你已经安装了 `torch` 和 `accelerate`。
|
||||
|
||||
`torch`的安装教程可以看 `torch` [文档](https://pytorch.org/get-started/locally/#start-locally).
|
||||
|
||||
@@ -65,17 +65,17 @@ pip install diffusers["flax"]
|
||||
pip install accelerate
|
||||
```
|
||||
|
||||
从源码安装 🤗 Diffusers 使用以下命令:
|
||||
从源码安装 🤗 Diffusers 需要使用以下命令:
|
||||
|
||||
```bash
|
||||
pip install git+https://github.com/huggingface/diffusers
|
||||
```
|
||||
|
||||
这个命令安装的是最新的 `main`版本,而不是最近的`stable`版。
|
||||
`main`是一直和最新进展保持一致的。比如,上次正式版发布了,有bug,新的正式版还没推出,但是`main`中可以看到这个bug被修复了。
|
||||
但是这也意味着 `main`版本并不总是稳定的。
|
||||
`main`是一直和最新进展保持一致的。比如,上次发布的正式版中有bug,在`main`中可以看到这个bug被修复了,但是新的正式版此时尚未推出。
|
||||
但是这也意味着 `main`版本不保证是稳定的。
|
||||
|
||||
我们努力保持`main`版本正常运行,大多数问题都能在几个小时或一天之内解决
|
||||
我们努力保持`main`版本正常运行,大多数问题都能在几个小时或一天之内解决
|
||||
|
||||
如果你遇到了问题,可以提 [Issue](https://github.com/huggingface/transformers/issues),这样我们就能更快修复问题了。
|
||||
|
||||
@@ -105,8 +105,8 @@ pip install -e ".[torch]"
|
||||
pip install -e ".[flax]"
|
||||
```
|
||||
|
||||
这些命令将连接你克隆的版本库和你的 Python 库路径。
|
||||
现在,除了正常的库路径外,Python 还会在你克隆的文件夹内寻找。
|
||||
这些命令将连接到你克隆的版本库和你的 Python 库路径。
|
||||
现在,不只是在通常的库路径,Python 还会在你克隆的文件夹内寻找包。
|
||||
例如,如果你的 Python 包通常安装在 `~/anaconda3/envs/main/lib/python3.7/Site-packages/`,Python 也会搜索你克隆到的文件夹。`~/diffusers/`。
|
||||
|
||||
<Tip warning={true}>
|
||||
@@ -116,32 +116,31 @@ pip install -e ".[flax]"
|
||||
</Tip>
|
||||
|
||||
|
||||
现在你可以用下面的命令轻松地将你克隆的🤗Diffusers仓库更新到最新版本。
|
||||
现在你可以用下面的命令轻松地将你克隆的 🤗 Diffusers 库更新到最新版本。
|
||||
|
||||
```bash
|
||||
cd ~/diffusers/
|
||||
git pull
|
||||
```
|
||||
|
||||
你的Python环境将在下次运行时找到`main`版本的🤗 Diffusers。
|
||||
你的Python环境将在下次运行时找到`main`版本的 🤗 Diffusers。
|
||||
|
||||
## 注意遥测日志
|
||||
## 注意 Telemetry 日志
|
||||
|
||||
我们的库会在使用`from_pretrained()`请求期间收集信息。这些数据包括Diffusers和PyTorch/Flax的版本,请求的模型或管道,以及预训练检查点的路径(如果它被托管在Hub上)。
|
||||
我们的库会在使用`from_pretrained()`请求期间收集 telemetry 信息。这些数据包括Diffusers和PyTorch/Flax的版本,请求的模型或管道类,以及预训练检查点的路径(如果它被托管在Hub上的话)。
|
||||
这些使用数据有助于我们调试问题并确定新功能的开发优先级。
|
||||
Telemetry 数据仅在从 HuggingFace Hub 中加载模型和管道时发送,而不会在本地使用期间收集。
|
||||
|
||||
这些使用数据有助于我们调试问题并优先考虑新功能。
|
||||
当从HuggingFace Hub加载模型和管道时才会发送遥测数据,并且在本地使用时不会收集数据。
|
||||
|
||||
我们知道并不是每个人都想分享这些的信息,我们尊重您的隐私,
|
||||
因此您可以通过在终端中设置“DISABLE_TELEMETRY”环境变量来禁用遥测数据的收集:
|
||||
我们知道,并不是每个人都想分享这些的信息,我们尊重您的隐私,
|
||||
因此您可以通过在终端中设置 `DISABLE_TELEMETRY` 环境变量从而禁用 Telemetry 数据收集:
|
||||
|
||||
|
||||
在Linux/MacOS中:
|
||||
Linux/MacOS :
|
||||
```bash
|
||||
export DISABLE_TELEMETRY=YES
|
||||
```
|
||||
|
||||
在Windows中:
|
||||
Windows :
|
||||
```bash
|
||||
set DISABLE_TELEMETRY=YES
|
||||
```
|
||||
566
examples/community/README.md
Normal file → Executable file
566
examples/community/README.md
Normal file → Executable file
@@ -6,33 +6,38 @@
|
||||
Please have a look at the following table to get an overview of all community examples. Click on the **Code Example** to get a copy-and-paste ready code example that you can try out.
|
||||
If a community doesn't work as expected, please open an issue and ping the author on it.
|
||||
|
||||
| Example | Description | Code Example | Colab | Author |
|
||||
|:---------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------:|
|
||||
| CLIP Guided Stable Diffusion | Doing CLIP guidance for text to image generation with Stable Diffusion | [CLIP Guided Stable Diffusion](#clip-guided-stable-diffusion) | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/CLIP_Guided_Stable_diffusion_with_diffusers.ipynb) | [Suraj Patil](https://github.com/patil-suraj/) |
|
||||
| One Step U-Net (Dummy) | Example showcasing of how to use Community Pipelines (see https://github.com/huggingface/diffusers/issues/841) | [One Step U-Net](#one-step-unet) | - | [Patrick von Platen](https://github.com/patrickvonplaten/) |
|
||||
| Stable Diffusion Interpolation | Interpolate the latent space of Stable Diffusion between different prompts/seeds | [Stable Diffusion Interpolation](#stable-diffusion-interpolation) | - | [Nate Raw](https://github.com/nateraw/) |
|
||||
| Stable Diffusion Mega | **One** Stable Diffusion Pipeline with all functionalities of [Text2Image](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py), [Image2Image](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py) and [Inpainting](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py) | [Stable Diffusion Mega](#stable-diffusion-mega) | - | [Patrick von Platen](https://github.com/patrickvonplaten/) |
|
||||
| Long Prompt Weighting Stable Diffusion | **One** Stable Diffusion Pipeline without tokens length limit, and support parsing weighting in prompt. | [Long Prompt Weighting Stable Diffusion](#long-prompt-weighting-stable-diffusion) | - | [SkyTNT](https://github.com/SkyTNT) |
|
||||
| Speech to Image | Using automatic-speech-recognition to transcribe text and Stable Diffusion to generate images | [Speech to Image](#speech-to-image) | - | [Mikail Duzenli](https://github.com/MikailINTech)
|
||||
| Wild Card Stable Diffusion | Stable Diffusion Pipeline that supports prompts that contain wildcard terms (indicated by surrounding double underscores), with values instantiated randomly from a corresponding txt file or a dictionary of possible values | [Wildcard Stable Diffusion](#wildcard-stable-diffusion) | - | [Shyam Sudhakaran](https://github.com/shyamsn97) |
|
||||
| [Composable Stable Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/) | Stable Diffusion Pipeline that supports prompts that contain "|" in prompts (as an AND condition) and weights (separated by "|" as well) to positively / negatively weight prompts. | [Composable Stable Diffusion](#composable-stable-diffusion) | - | [Mark Rich](https://github.com/MarkRich) |
|
||||
| Seed Resizing Stable Diffusion| Stable Diffusion Pipeline that supports resizing an image and retaining the concepts of the 512 by 512 generation. | [Seed Resizing](#seed-resizing) | - | [Mark Rich](https://github.com/MarkRich) |
|
||||
| Imagic Stable Diffusion | Stable Diffusion Pipeline that enables writing a text prompt to edit an existing image| [Imagic Stable Diffusion](#imagic-stable-diffusion) | - | [Mark Rich](https://github.com/MarkRich) |
|
||||
| Multilingual Stable Diffusion| Stable Diffusion Pipeline that supports prompts in 50 different languages. | [Multilingual Stable Diffusion](#multilingual-stable-diffusion-pipeline) | - | [Juan Carlos Piñeros](https://github.com/juancopi81) |
|
||||
| Image to Image Inpainting Stable Diffusion | Stable Diffusion Pipeline that enables the overlaying of two images and subsequent inpainting| [Image to Image Inpainting Stable Diffusion](#image-to-image-inpainting-stable-diffusion) | - | [Alex McKinney](https://github.com/vvvm23) |
|
||||
| Text Based Inpainting Stable Diffusion | Stable Diffusion Inpainting Pipeline that enables passing a text prompt to generate the mask for inpainting| [Text Based Inpainting Stable Diffusion](#image-to-image-inpainting-stable-diffusion) | - | [Dhruv Karan](https://github.com/unography) |
|
||||
| Bit Diffusion | Diffusion on discrete data | [Bit Diffusion](#bit-diffusion) | - |[Stuti R.](https://github.com/kingstut) |
|
||||
| K-Diffusion Stable Diffusion | Run Stable Diffusion with any of [K-Diffusion's samplers](https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/sampling.py) | [Stable Diffusion with K Diffusion](#stable-diffusion-with-k-diffusion) | - | [Patrick von Platen](https://github.com/patrickvonplaten/) |
|
||||
| Checkpoint Merger Pipeline | Diffusion Pipeline that enables merging of saved model checkpoints | [Checkpoint Merger Pipeline](#checkpoint-merger-pipeline) | - | [Naga Sai Abhinay Devarinti](https://github.com/Abhinay1997/) |
|
||||
Stable Diffusion v1.1-1.4 Comparison | Run all 4 model checkpoints for Stable Diffusion and compare their results together | [Stable Diffusion Comparison](#stable-diffusion-comparisons) | - | [Suvaditya Mukherjee](https://github.com/suvadityamuk) |
|
||||
MagicMix | Diffusion Pipeline for semantic mixing of an image and a text prompt | [MagicMix](#magic-mix) | - | [Partho Das](https://github.com/daspartho) |
|
||||
| Stable UnCLIP | Diffusion Pipeline for combining prior model (generate clip image embedding from text, UnCLIPPipeline `"kakaobrain/karlo-v1-alpha"`) and decoder pipeline (decode clip image embedding to image, StableDiffusionImageVariationPipeline `"lambdalabs/sd-image-variations-diffusers"` ). | [Stable UnCLIP](#stable-unclip) | - |[Ray Wang](https://wrong.wang) |
|
||||
| UnCLIP Text Interpolation Pipeline | Diffusion Pipeline that allows passing two prompts and produces images while interpolating between the text-embeddings of the two prompts | [UnCLIP Text Interpolation Pipeline](#unclip-text-interpolation-pipeline) | - | [Naga Sai Abhinay Devarinti](https://github.com/Abhinay1997/) |
|
||||
| UnCLIP Image Interpolation Pipeline | Diffusion Pipeline that allows passing two images/image_embeddings and produces images while interpolating between their image-embeddings | [UnCLIP Image Interpolation Pipeline](#unclip-image-interpolation-pipeline) | - | [Naga Sai Abhinay Devarinti](https://github.com/Abhinay1997/) |
|
||||
| DDIM Noise Comparative Analysis Pipeline | Investigating how the diffusion models learn visual concepts from each noise level (which is a contribution of [P2 weighting (CVPR 2022)](https://arxiv.org/abs/2204.00227)) | [DDIM Noise Comparative Analysis Pipeline](#ddim-noise-comparative-analysis-pipeline) | - |[Aengus (Duc-Anh)](https://github.com/aengusng8) |
|
||||
| CLIP Guided Img2Img Stable Diffusion Pipeline | Doing CLIP guidance for image to image generation with Stable Diffusion | [CLIP Guided Img2Img Stable Diffusion](#clip-guided-img2img-stable-diffusion) | - | [Nipun Jindal](https://github.com/nipunjindal/) |
|
||||
| TensorRT Stable Diffusion Pipeline | Accelerates the Stable Diffusion Text2Image Pipeline using TensorRT | [TensorRT Stable Diffusion Pipeline](#tensorrt-text2image-stable-diffusion-pipeline) | - |[Asfiya Baig](https://github.com/asfiyab-nvidia) |
|
||||
|
||||
| Example | Description | Code Example | Colab | Author |
|
||||
|:--------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------:|
|
||||
| CLIP Guided Stable Diffusion | Doing CLIP guidance for text to image generation with Stable Diffusion | [CLIP Guided Stable Diffusion](#clip-guided-stable-diffusion) | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/CLIP_Guided_Stable_diffusion_with_diffusers.ipynb) | [Suraj Patil](https://github.com/patil-suraj/) |
|
||||
| One Step U-Net (Dummy) | Example showcasing of how to use Community Pipelines (see https://github.com/huggingface/diffusers/issues/841) | [One Step U-Net](#one-step-unet) | - | [Patrick von Platen](https://github.com/patrickvonplaten/) |
|
||||
| Stable Diffusion Interpolation | Interpolate the latent space of Stable Diffusion between different prompts/seeds | [Stable Diffusion Interpolation](#stable-diffusion-interpolation) | - | [Nate Raw](https://github.com/nateraw/) |
|
||||
| Stable Diffusion Mega | **One** Stable Diffusion Pipeline with all functionalities of [Text2Image](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py), [Image2Image](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py) and [Inpainting](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py) | [Stable Diffusion Mega](#stable-diffusion-mega) | - | [Patrick von Platen](https://github.com/patrickvonplaten/) |
|
||||
| Long Prompt Weighting Stable Diffusion | **One** Stable Diffusion Pipeline without tokens length limit, and support parsing weighting in prompt. | [Long Prompt Weighting Stable Diffusion](#long-prompt-weighting-stable-diffusion) | - | [SkyTNT](https://github.com/SkyTNT) |
|
||||
| Speech to Image | Using automatic-speech-recognition to transcribe text and Stable Diffusion to generate images | [Speech to Image](#speech-to-image) | - | [Mikail Duzenli](https://github.com/MikailINTech)
|
||||
| Wild Card Stable Diffusion | Stable Diffusion Pipeline that supports prompts that contain wildcard terms (indicated by surrounding double underscores), with values instantiated randomly from a corresponding txt file or a dictionary of possible values | [Wildcard Stable Diffusion](#wildcard-stable-diffusion) | - | [Shyam Sudhakaran](https://github.com/shyamsn97) |
|
||||
| [Composable Stable Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/) | Stable Diffusion Pipeline that supports prompts that contain "|" in prompts (as an AND condition) and weights (separated by "|" as well) to positively / negatively weight prompts. | [Composable Stable Diffusion](#composable-stable-diffusion) | - | [Mark Rich](https://github.com/MarkRich) |
|
||||
| Seed Resizing Stable Diffusion | Stable Diffusion Pipeline that supports resizing an image and retaining the concepts of the 512 by 512 generation. | [Seed Resizing](#seed-resizing) | - | [Mark Rich](https://github.com/MarkRich) |
|
||||
| Imagic Stable Diffusion | Stable Diffusion Pipeline that enables writing a text prompt to edit an existing image | [Imagic Stable Diffusion](#imagic-stable-diffusion) | - | [Mark Rich](https://github.com/MarkRich) |
|
||||
| Multilingual Stable Diffusion | Stable Diffusion Pipeline that supports prompts in 50 different languages. | [Multilingual Stable Diffusion](#multilingual-stable-diffusion-pipeline) | - | [Juan Carlos Piñeros](https://github.com/juancopi81) |
|
||||
| Image to Image Inpainting Stable Diffusion | Stable Diffusion Pipeline that enables the overlaying of two images and subsequent inpainting | [Image to Image Inpainting Stable Diffusion](#image-to-image-inpainting-stable-diffusion) | - | [Alex McKinney](https://github.com/vvvm23) |
|
||||
| Text Based Inpainting Stable Diffusion | Stable Diffusion Inpainting Pipeline that enables passing a text prompt to generate the mask for inpainting | [Text Based Inpainting Stable Diffusion](#image-to-image-inpainting-stable-diffusion) | - | [Dhruv Karan](https://github.com/unography) |
|
||||
| Bit Diffusion | Diffusion on discrete data | [Bit Diffusion](#bit-diffusion) | - | [Stuti R.](https://github.com/kingstut) |
|
||||
| K-Diffusion Stable Diffusion | Run Stable Diffusion with any of [K-Diffusion's samplers](https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/sampling.py) | [Stable Diffusion with K Diffusion](#stable-diffusion-with-k-diffusion) | - | [Patrick von Platen](https://github.com/patrickvonplaten/) |
|
||||
| Checkpoint Merger Pipeline | Diffusion Pipeline that enables merging of saved model checkpoints | [Checkpoint Merger Pipeline](#checkpoint-merger-pipeline) | - | [Naga Sai Abhinay Devarinti](https://github.com/Abhinay1997/) |
|
||||
Stable Diffusion v1.1-1.4 Comparison | Run all 4 model checkpoints for Stable Diffusion and compare their results together | [Stable Diffusion Comparison](#stable-diffusion-comparisons) | - | [Suvaditya Mukherjee](https://github.com/suvadityamuk) |
|
||||
MagicMix | Diffusion Pipeline for semantic mixing of an image and a text prompt | [MagicMix](#magic-mix) | - | [Partho Das](https://github.com/daspartho) |
|
||||
| Stable UnCLIP | Diffusion Pipeline for combining prior model (generate clip image embedding from text, UnCLIPPipeline `"kakaobrain/karlo-v1-alpha"`) and decoder pipeline (decode clip image embedding to image, StableDiffusionImageVariationPipeline `"lambdalabs/sd-image-variations-diffusers"` ). | [Stable UnCLIP](#stable-unclip) | - | [Ray Wang](https://wrong.wang) |
|
||||
| UnCLIP Text Interpolation Pipeline | Diffusion Pipeline that allows passing two prompts and produces images while interpolating between the text-embeddings of the two prompts | [UnCLIP Text Interpolation Pipeline](#unclip-text-interpolation-pipeline) | - | [Naga Sai Abhinay Devarinti](https://github.com/Abhinay1997/) |
|
||||
| UnCLIP Image Interpolation Pipeline | Diffusion Pipeline that allows passing two images/image_embeddings and produces images while interpolating between their image-embeddings | [UnCLIP Image Interpolation Pipeline](#unclip-image-interpolation-pipeline) | - | [Naga Sai Abhinay Devarinti](https://github.com/Abhinay1997/) |
|
||||
| DDIM Noise Comparative Analysis Pipeline | Investigating how the diffusion models learn visual concepts from each noise level (which is a contribution of [P2 weighting (CVPR 2022)](https://arxiv.org/abs/2204.00227)) | [DDIM Noise Comparative Analysis Pipeline](#ddim-noise-comparative-analysis-pipeline) | - | [Aengus (Duc-Anh)](https://github.com/aengusng8) |
|
||||
| CLIP Guided Img2Img Stable Diffusion Pipeline | Doing CLIP guidance for image to image generation with Stable Diffusion | [CLIP Guided Img2Img Stable Diffusion](#clip-guided-img2img-stable-diffusion) | - | [Nipun Jindal](https://github.com/nipunjindal/) |
|
||||
| TensorRT Stable Diffusion Text to Image Pipeline | Accelerates the Stable Diffusion Text2Image Pipeline using TensorRT | [TensorRT Stable Diffusion Text to Image Pipeline](#tensorrt-text2image-stable-diffusion-pipeline) | - | [Asfiya Baig](https://github.com/asfiyab-nvidia) |
|
||||
| EDICT Image Editing Pipeline | Diffusion pipeline for text-guided image editing | [EDICT Image Editing Pipeline](#edict-image-editing-pipeline) | - | [Joqsan Azocar](https://github.com/Joqsan) |
|
||||
| Stable Diffusion RePaint | Stable Diffusion pipeline using [RePaint](https://arxiv.org/abs/2201.0986) for inpainting. | [Stable Diffusion RePaint](#stable-diffusion-repaint ) | - | [Markus Pobitzer](https://github.com/Markus-Pobitzer) |
|
||||
| TensorRT Stable Diffusion Image to Image Pipeline | Accelerates the Stable Diffusion Image2Image Pipeline using TensorRT | [TensorRT Stable Diffusion Image to Image Pipeline](#tensorrt-image2image-stable-diffusion-pipeline) | - | [Asfiya Baig](https://github.com/asfiyab-nvidia) |
|
||||
| Stable Diffusion IPEX Pipeline | Accelerate Stable Diffusion inference pipeline with BF16/FP32 precision on Intel Xeon CPUs with [IPEX](https://github.com/intel/intel-extension-for-pytorch) | [Stable Diffusion on IPEX](#stable-diffusion-on-ipex) | - | [Yingjie Han](https://github.com/yingjie-han/) |
|
||||
| CLIP Guided Images Mixing Stable Diffusion Pipeline | Сombine images using usual diffusion models. | [CLIP Guided Images Mixing Using Stable Diffusion](#clip-guided-images-mixing-with-stable-diffusion) | - | [Karachev Denis](https://github.com/TheDenk) |
|
||||
| TensorRT Stable Diffusion Inpainting Pipeline | Accelerates the Stable Diffusion Inpainting Pipeline using TensorRT | [TensorRT Stable Diffusion Inpainting Pipeline](#tensorrt-inpainting-stable-diffusion-pipeline) | - | [Asfiya Baig](https://github.com/asfiyab-nvidia) |
|
||||
|
||||
To load a custom pipeline you just need to pass the `custom_pipeline` argument to `DiffusionPipeline`, as one of the files in `diffusers/examples/community`. Feel free to send a PR with your own pipelines, we will merge them quickly.
|
||||
```py
|
||||
@@ -1161,3 +1166,510 @@ prompt = "a beautiful photograph of Mt. Fuji during cherry blossom"
|
||||
image = pipe(prompt).images[0]
|
||||
image.save('tensorrt_mt_fuji.png')
|
||||
```
|
||||
|
||||
### EDICT Image Editing Pipeline
|
||||
|
||||
This pipeline implements the text-guided image editing approach from the paper [EDICT: Exact Diffusion Inversion via Coupled Transformations](https://arxiv.org/abs/2211.12446). You have to pass:
|
||||
- (`PIL`) `image` you want to edit.
|
||||
- `base_prompt`: the text prompt describing the current image (before editing).
|
||||
- `target_prompt`: the text prompt describing with the edits.
|
||||
|
||||
```python
|
||||
from diffusers import DiffusionPipeline, DDIMScheduler
|
||||
from transformers import CLIPTextModel
|
||||
import torch, PIL, requests
|
||||
from io import BytesIO
|
||||
from IPython.display import display
|
||||
|
||||
def center_crop_and_resize(im):
|
||||
|
||||
width, height = im.size
|
||||
d = min(width, height)
|
||||
left = (width - d) / 2
|
||||
upper = (height - d) / 2
|
||||
right = (width + d) / 2
|
||||
lower = (height + d) / 2
|
||||
|
||||
return im.crop((left, upper, right, lower)).resize((512, 512))
|
||||
|
||||
torch_dtype = torch.float16
|
||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
|
||||
# scheduler and text_encoder param values as in the paper
|
||||
scheduler = DDIMScheduler(
|
||||
num_train_timesteps=1000,
|
||||
beta_start=0.00085,
|
||||
beta_end=0.012,
|
||||
beta_schedule="scaled_linear",
|
||||
set_alpha_to_one=False,
|
||||
clip_sample=False,
|
||||
)
|
||||
|
||||
text_encoder = CLIPTextModel.from_pretrained(
|
||||
pretrained_model_name_or_path="openai/clip-vit-large-patch14",
|
||||
torch_dtype=torch_dtype,
|
||||
)
|
||||
|
||||
# initialize pipeline
|
||||
pipeline = DiffusionPipeline.from_pretrained(
|
||||
pretrained_model_name_or_path="CompVis/stable-diffusion-v1-4",
|
||||
custom_pipeline="edict_pipeline",
|
||||
revision="fp16",
|
||||
scheduler=scheduler,
|
||||
text_encoder=text_encoder,
|
||||
leapfrog_steps=True,
|
||||
torch_dtype=torch_dtype,
|
||||
).to(device)
|
||||
|
||||
# download image
|
||||
image_url = "https://huggingface.co/datasets/Joqsan/images/resolve/main/imagenet_dog_1.jpeg"
|
||||
response = requests.get(image_url)
|
||||
image = PIL.Image.open(BytesIO(response.content))
|
||||
|
||||
# preprocess it
|
||||
cropped_image = center_crop_and_resize(image)
|
||||
|
||||
# define the prompts
|
||||
base_prompt = "A dog"
|
||||
target_prompt = "A golden retriever"
|
||||
|
||||
# run the pipeline
|
||||
result_image = pipeline(
|
||||
base_prompt=base_prompt,
|
||||
target_prompt=target_prompt,
|
||||
image=cropped_image,
|
||||
)
|
||||
|
||||
display(result_image)
|
||||
```
|
||||
|
||||
Init Image
|
||||
|
||||

|
||||
|
||||
Output Image
|
||||
|
||||

|
||||
|
||||
### Stable Diffusion RePaint
|
||||
|
||||
This pipeline uses the [RePaint](https://arxiv.org/abs/2201.09865) logic on the latent space of stable diffusion. It can
|
||||
be used similarly to other image inpainting pipelines but does not rely on a specific inpainting model. This means you can use
|
||||
models that are not specifically created for inpainting.
|
||||
|
||||
Make sure to use the ```RePaintScheduler``` as shown in the example below.
|
||||
|
||||
Disclaimer: The mask gets transferred into latent space, this may lead to unexpected changes on the edge of the masked part.
|
||||
The inference time is a lot slower.
|
||||
|
||||
```py
|
||||
import PIL
|
||||
import requests
|
||||
import torch
|
||||
from io import BytesIO
|
||||
from diffusers import StableDiffusionPipeline, RePaintScheduler
|
||||
def download_image(url):
|
||||
response = requests.get(url)
|
||||
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
||||
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
|
||||
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
|
||||
init_image = download_image(img_url).resize((512, 512))
|
||||
mask_image = download_image(mask_url).resize((512, 512))
|
||||
mask_image = PIL.ImageOps.invert(mask_image)
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, custom_pipeline="stable_diffusion_repaint",
|
||||
)
|
||||
pipe.scheduler = RePaintScheduler.from_config(pipe.scheduler.config)
|
||||
pipe = pipe.to("cuda")
|
||||
prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
|
||||
image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
|
||||
```
|
||||
|
||||
### TensorRT Image2Image Stable Diffusion Pipeline
|
||||
|
||||
The TensorRT Pipeline can be used to accelerate the Image2Image Stable Diffusion Inference run.
|
||||
|
||||
NOTE: The ONNX conversions and TensorRT engine build may take up to 30 minutes.
|
||||
|
||||
```python
|
||||
import requests
|
||||
from io import BytesIO
|
||||
from PIL import Image
|
||||
import torch
|
||||
from diffusers import DDIMScheduler
|
||||
from diffusers.pipelines.stable_diffusion import StableDiffusionImg2ImgPipeline
|
||||
|
||||
# Use the DDIMScheduler scheduler here instead
|
||||
scheduler = DDIMScheduler.from_pretrained("stabilityai/stable-diffusion-2-1",
|
||||
subfolder="scheduler")
|
||||
|
||||
|
||||
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-1",
|
||||
custom_pipeline="stable_diffusion_tensorrt_img2img",
|
||||
revision='fp16',
|
||||
torch_dtype=torch.float16,
|
||||
scheduler=scheduler,)
|
||||
|
||||
# re-use cached folder to save ONNX models and TensorRT Engines
|
||||
pipe.set_cached_folder("stabilityai/stable-diffusion-2-1", revision='fp16',)
|
||||
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
url = "https://pajoca.com/wp-content/uploads/2022/09/tekito-yamakawa-1.png"
|
||||
response = requests.get(url)
|
||||
input_image = Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
prompt = "photorealistic new zealand hills"
|
||||
image = pipe(prompt, image=input_image, strength=0.75,).images[0]
|
||||
image.save('tensorrt_img2img_new_zealand_hills.png')
|
||||
```
|
||||
|
||||
### Stable Diffusion Reference
|
||||
|
||||
This pipeline uses the Reference Control. Refer to the [sd-webui-controlnet discussion: Reference-only Control](https://github.com/Mikubill/sd-webui-controlnet/discussions/1236)[sd-webui-controlnet discussion: Reference-adain Control](https://github.com/Mikubill/sd-webui-controlnet/discussions/1280).
|
||||
|
||||
Based on [this issue](https://github.com/huggingface/diffusers/issues/3566),
|
||||
- `EulerAncestralDiscreteScheduler` got poor results.
|
||||
|
||||
```py
|
||||
import torch
|
||||
from diffusers import UniPCMultistepScheduler
|
||||
from diffusers.utils import load_image
|
||||
|
||||
input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png")
|
||||
|
||||
pipe = StableDiffusionReferencePipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
safety_checker=None,
|
||||
torch_dtype=torch.float16
|
||||
).to('cuda:0')
|
||||
|
||||
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
||||
|
||||
result_img = pipe(ref_image=input_image,
|
||||
prompt="1girl",
|
||||
num_inference_steps=20,
|
||||
reference_attn=True,
|
||||
reference_adain=True).images[0]
|
||||
```
|
||||
|
||||
Reference Image
|
||||
|
||||

|
||||
|
||||
Output Image of `reference_attn=True` and `reference_adain=False`
|
||||
|
||||

|
||||
|
||||
Output Image of `reference_attn=False` and `reference_adain=True`
|
||||
|
||||

|
||||
|
||||
Output Image of `reference_attn=True` and `reference_adain=True`
|
||||
|
||||

|
||||
|
||||
### Stable Diffusion ControlNet Reference
|
||||
|
||||
This pipeline uses the Reference Control with ControlNet. Refer to the [sd-webui-controlnet discussion: Reference-only Control](https://github.com/Mikubill/sd-webui-controlnet/discussions/1236)[sd-webui-controlnet discussion: Reference-adain Control](https://github.com/Mikubill/sd-webui-controlnet/discussions/1280).
|
||||
|
||||
Based on [this issue](https://github.com/huggingface/diffusers/issues/3566),
|
||||
- `EulerAncestralDiscreteScheduler` got poor results.
|
||||
- `guess_mode=True` works well for ControlNet v1.1
|
||||
|
||||
```py
|
||||
import cv2
|
||||
import torch
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from diffusers import UniPCMultistepScheduler
|
||||
from diffusers.utils import load_image
|
||||
|
||||
input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png")
|
||||
|
||||
# get canny image
|
||||
image = cv2.Canny(np.array(input_image), 100, 200)
|
||||
image = image[:, :, None]
|
||||
image = np.concatenate([image, image, image], axis=2)
|
||||
canny_image = Image.fromarray(image)
|
||||
|
||||
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
||||
pipe = StableDiffusionControlNetReferencePipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
controlnet=controlnet,
|
||||
safety_checker=None,
|
||||
torch_dtype=torch.float16
|
||||
).to('cuda:0')
|
||||
|
||||
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
||||
|
||||
result_img = pipe(ref_image=input_image,
|
||||
prompt="1girl",
|
||||
image=canny_image,
|
||||
num_inference_steps=20,
|
||||
reference_attn=True,
|
||||
reference_adain=True).images[0]
|
||||
```
|
||||
|
||||
Reference Image
|
||||
|
||||

|
||||
|
||||
Output Image
|
||||
|
||||

|
||||
|
||||
|
||||
### Stable Diffusion on IPEX
|
||||
|
||||
This diffusion pipeline aims to accelarate the inference of Stable-Diffusion on Intel Xeon CPUs with BF16/FP32 precision using [IPEX](https://github.com/intel/intel-extension-for-pytorch).
|
||||
|
||||
To use this pipeline, you need to:
|
||||
1. Install [IPEX](https://github.com/intel/intel-extension-for-pytorch)
|
||||
|
||||
**Note:** For each PyTorch release, there is a corresponding release of the IPEX. Here is the mapping relationship. It is recommended to install Pytorch/IPEX2.0 to get the best performance.
|
||||
|
||||
|PyTorch Version|IPEX Version|
|
||||
|--|--|
|
||||
|[v2.0.\*](https://github.com/pytorch/pytorch/tree/v2.0.1 "v2.0.1")|[v2.0.\*](https://github.com/intel/intel-extension-for-pytorch/tree/v2.0.100+cpu)|
|
||||
|[v1.13.\*](https://github.com/pytorch/pytorch/tree/v1.13.0 "v1.13.0")|[v1.13.\*](https://github.com/intel/intel-extension-for-pytorch/tree/v1.13.100+cpu)|
|
||||
|
||||
You can simply use pip to install IPEX with the latest version.
|
||||
```python
|
||||
python -m pip install intel_extension_for_pytorch
|
||||
```
|
||||
**Note:** To install a specific version, run with the following command:
|
||||
```
|
||||
python -m pip install intel_extension_for_pytorch==<version_name> -f https://developer.intel.com/ipex-whl-stable-cpu
|
||||
```
|
||||
|
||||
2. After pipeline initialization, `prepare_for_ipex()` should be called to enable IPEX accelaration. Supported inference datatypes are Float32 and BFloat16.
|
||||
|
||||
**Note:** The setting of generated image height/width for `prepare_for_ipex()` should be same as the setting of pipeline inference.
|
||||
```python
|
||||
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", custom_pipeline="stable_diffusion_ipex")
|
||||
# For Float32
|
||||
pipe.prepare_for_ipex(prompt, dtype=torch.float32, height=512, width=512) #value of image height/width should be consistent with the pipeline inference
|
||||
# For BFloat16
|
||||
pipe.prepare_for_ipex(prompt, dtype=torch.bfloat16, height=512, width=512) #value of image height/width should be consistent with the pipeline inference
|
||||
```
|
||||
|
||||
Then you can use the ipex pipeline in a similar way to the default stable diffusion pipeline.
|
||||
```python
|
||||
# For Float32
|
||||
image = pipe(prompt, num_inference_steps=20, height=512, width=512).images[0] #value of image height/width should be consistent with 'prepare_for_ipex()'
|
||||
# For BFloat16
|
||||
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
|
||||
image = pipe(prompt, num_inference_steps=20, height=512, width=512).images[0] #value of image height/width should be consistent with 'prepare_for_ipex()'
|
||||
```
|
||||
|
||||
The following code compares the performance of the original stable diffusion pipeline with the ipex-optimized pipeline.
|
||||
|
||||
```python
|
||||
import torch
|
||||
import intel_extension_for_pytorch as ipex
|
||||
from diffusers import StableDiffusionPipeline
|
||||
import time
|
||||
|
||||
prompt = "sailing ship in storm by Rembrandt"
|
||||
model_id = "runwayml/stable-diffusion-v1-5"
|
||||
# Helper function for time evaluation
|
||||
def elapsed_time(pipeline, nb_pass=3, num_inference_steps=20):
|
||||
# warmup
|
||||
for _ in range(2):
|
||||
images = pipeline(prompt, num_inference_steps=num_inference_steps, height=512, width=512).images
|
||||
#time evaluation
|
||||
start = time.time()
|
||||
for _ in range(nb_pass):
|
||||
pipeline(prompt, num_inference_steps=num_inference_steps, height=512, width=512)
|
||||
end = time.time()
|
||||
return (end - start) / nb_pass
|
||||
|
||||
############## bf16 inference performance ###############
|
||||
|
||||
# 1. IPEX Pipeline initialization
|
||||
pipe = DiffusionPipeline.from_pretrained(model_id, custom_pipeline="stable_diffusion_ipex")
|
||||
pipe.prepare_for_ipex(prompt, dtype=torch.bfloat16, height=512, width=512)
|
||||
|
||||
# 2. Original Pipeline initialization
|
||||
pipe2 = StableDiffusionPipeline.from_pretrained(model_id)
|
||||
|
||||
# 3. Compare performance between Original Pipeline and IPEX Pipeline
|
||||
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
|
||||
latency = elapsed_time(pipe)
|
||||
print("Latency of StableDiffusionIPEXPipeline--bf16", latency)
|
||||
latency = elapsed_time(pipe2)
|
||||
print("Latency of StableDiffusionPipeline--bf16",latency)
|
||||
|
||||
############## fp32 inference performance ###############
|
||||
|
||||
# 1. IPEX Pipeline initialization
|
||||
pipe3 = DiffusionPipeline.from_pretrained(model_id, custom_pipeline="stable_diffusion_ipex")
|
||||
pipe3.prepare_for_ipex(prompt, dtype=torch.float32, height=512, width=512)
|
||||
|
||||
# 2. Original Pipeline initialization
|
||||
pipe4 = StableDiffusionPipeline.from_pretrained(model_id)
|
||||
|
||||
# 3. Compare performance between Original Pipeline and IPEX Pipeline
|
||||
latency = elapsed_time(pipe3)
|
||||
print("Latency of StableDiffusionIPEXPipeline--fp32", latency)
|
||||
latency = elapsed_time(pipe4)
|
||||
print("Latency of StableDiffusionPipeline--fp32",latency)
|
||||
|
||||
```
|
||||
|
||||
### CLIP Guided Images Mixing With Stable Diffusion
|
||||
|
||||

|
||||
|
||||
CLIP guided stable diffusion images mixing pipline allows to combine two images using standard diffusion models.
|
||||
This approach is using (optional) CoCa model to avoid writing image description.
|
||||
[More code examples](https://github.com/TheDenk/images_mixing)
|
||||
|
||||
## Example Images Mixing (with CoCa)
|
||||
```python
|
||||
import requests
|
||||
from io import BytesIO
|
||||
|
||||
import PIL
|
||||
import torch
|
||||
import open_clip
|
||||
from open_clip import SimpleTokenizer
|
||||
from diffusers import DiffusionPipeline
|
||||
from transformers import CLIPFeatureExtractor, CLIPModel
|
||||
|
||||
|
||||
def download_image(url):
|
||||
response = requests.get(url)
|
||||
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
# Loading additional models
|
||||
feature_extractor = CLIPFeatureExtractor.from_pretrained(
|
||||
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
|
||||
)
|
||||
clip_model = CLIPModel.from_pretrained(
|
||||
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16
|
||||
)
|
||||
coca_model = open_clip.create_model('coca_ViT-L-14', pretrained='laion2B-s13B-b90k').to('cuda')
|
||||
coca_model.dtype = torch.float16
|
||||
coca_transform = open_clip.image_transform(
|
||||
coca_model.visual.image_size,
|
||||
is_train = False,
|
||||
mean = getattr(coca_model.visual, 'image_mean', None),
|
||||
std = getattr(coca_model.visual, 'image_std', None),
|
||||
)
|
||||
coca_tokenizer = SimpleTokenizer()
|
||||
|
||||
# Pipline creating
|
||||
mixing_pipeline = DiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
custom_pipeline="clip_guided_images_mixing_stable_diffusion",
|
||||
clip_model=clip_model,
|
||||
feature_extractor=feature_extractor,
|
||||
coca_model=coca_model,
|
||||
coca_tokenizer=coca_tokenizer,
|
||||
coca_transform=coca_transform,
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
mixing_pipeline.enable_attention_slicing()
|
||||
mixing_pipeline = mixing_pipeline.to("cuda")
|
||||
|
||||
# Pipline running
|
||||
generator = torch.Generator(device="cuda").manual_seed(17)
|
||||
|
||||
def download_image(url):
|
||||
response = requests.get(url)
|
||||
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
content_image = download_image("https://huggingface.co/datasets/TheDenk/images_mixing/resolve/main/boromir.jpg")
|
||||
style_image = download_image("https://huggingface.co/datasets/TheDenk/images_mixing/resolve/main/gigachad.jpg")
|
||||
|
||||
pipe_images = mixing_pipeline(
|
||||
num_inference_steps=50,
|
||||
content_image=content_image,
|
||||
style_image=style_image,
|
||||
noise_strength=0.65,
|
||||
slerp_latent_style_strength=0.9,
|
||||
slerp_prompt_style_strength=0.1,
|
||||
slerp_clip_image_style_strength=0.1,
|
||||
guidance_scale=9.0,
|
||||
batch_size=1,
|
||||
clip_guidance_scale=100,
|
||||
generator=generator,
|
||||
).images
|
||||
```
|
||||
|
||||

|
||||
|
||||
### Stable Diffusion Mixture
|
||||
|
||||
This pipeline uses the Mixture. Refer to the [Mixture](https://arxiv.org/abs/2302.02412) paper for more details.
|
||||
|
||||
```python
|
||||
from diffusers import LMSDiscreteScheduler, DiffusionPipeline
|
||||
|
||||
# Creater scheduler and model (similar to StableDiffusionPipeline)
|
||||
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
||||
pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=scheduler, custom_pipeline="mixture_tiling")
|
||||
pipeline.to("cuda")
|
||||
|
||||
# Mixture of Diffusers generation
|
||||
image = pipeline(
|
||||
prompt=[[
|
||||
"A charming house in the countryside, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
||||
"A dirt road in the countryside crossing pastures, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
||||
"An old and rusty giant robot lying on a dirt road, by jakub rozalski, dark sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece"
|
||||
]],
|
||||
tile_height=640,
|
||||
tile_width=640,
|
||||
tile_row_overlap=0,
|
||||
tile_col_overlap=256,
|
||||
guidance_scale=8,
|
||||
seed=7178915308,
|
||||
num_inference_steps=50,
|
||||
)["images"][0]
|
||||
```
|
||||

|
||||
|
||||
### TensorRT Inpainting Stable Diffusion Pipeline
|
||||
|
||||
The TensorRT Pipeline can be used to accelerate the Inpainting Stable Diffusion Inference run.
|
||||
|
||||
NOTE: The ONNX conversions and TensorRT engine build may take up to 30 minutes.
|
||||
|
||||
```python
|
||||
import requests
|
||||
from io import BytesIO
|
||||
from PIL import Image
|
||||
import torch
|
||||
from diffusers import PNDMScheduler
|
||||
from diffusers.pipelines.stable_diffusion import StableDiffusionImg2ImgPipeline
|
||||
|
||||
# Use the PNDMScheduler scheduler here instead
|
||||
scheduler = PNDMScheduler.from_pretrained("stabilityai/stable-diffusion-2-inpainting", subfolder="scheduler")
|
||||
|
||||
|
||||
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-inpainting",
|
||||
custom_pipeline="stable_diffusion_tensorrt_inpaint",
|
||||
revision='fp16',
|
||||
torch_dtype=torch.float16,
|
||||
scheduler=scheduler,
|
||||
)
|
||||
|
||||
# re-use cached folder to save ONNX models and TensorRT Engines
|
||||
pipe.set_cached_folder("stabilityai/stable-diffusion-2-inpainting", revision='fp16',)
|
||||
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
|
||||
response = requests.get(url)
|
||||
input_image = Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
|
||||
response = requests.get(mask_url)
|
||||
mask_image = Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
prompt = "a mecha robot sitting on a bench"
|
||||
image = pipe(prompt, image=input_image, mask_image=mask_image, strength=0.75,).images[0]
|
||||
image.save('tensorrt_inpaint_mecha_robot.png')
|
||||
```
|
||||
456
examples/community/clip_guided_images_mixing_stable_diffusion.py
Normal file
456
examples/community/clip_guided_images_mixing_stable_diffusion.py
Normal file
@@ -0,0 +1,456 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import inspect
|
||||
from typing import Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import PIL
|
||||
import torch
|
||||
from torch.nn import functional as F
|
||||
from torchvision import transforms
|
||||
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
|
||||
|
||||
from diffusers import (
|
||||
AutoencoderKL,
|
||||
DDIMScheduler,
|
||||
DiffusionPipeline,
|
||||
DPMSolverMultistepScheduler,
|
||||
LMSDiscreteScheduler,
|
||||
PNDMScheduler,
|
||||
UNet2DConditionModel,
|
||||
)
|
||||
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
|
||||
from diffusers.utils import (
|
||||
PIL_INTERPOLATION,
|
||||
randn_tensor,
|
||||
)
|
||||
|
||||
|
||||
def preprocess(image, w, h):
|
||||
if isinstance(image, torch.Tensor):
|
||||
return image
|
||||
elif isinstance(image, PIL.Image.Image):
|
||||
image = [image]
|
||||
|
||||
if isinstance(image[0], PIL.Image.Image):
|
||||
image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
|
||||
image = np.concatenate(image, axis=0)
|
||||
image = np.array(image).astype(np.float32) / 255.0
|
||||
image = image.transpose(0, 3, 1, 2)
|
||||
image = 2.0 * image - 1.0
|
||||
image = torch.from_numpy(image)
|
||||
elif isinstance(image[0], torch.Tensor):
|
||||
image = torch.cat(image, dim=0)
|
||||
return image
|
||||
|
||||
|
||||
def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
|
||||
if not isinstance(v0, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
input_device = v0.device
|
||||
v0 = v0.cpu().numpy()
|
||||
v1 = v1.cpu().numpy()
|
||||
|
||||
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
||||
if np.abs(dot) > DOT_THRESHOLD:
|
||||
v2 = (1 - t) * v0 + t * v1
|
||||
else:
|
||||
theta_0 = np.arccos(dot)
|
||||
sin_theta_0 = np.sin(theta_0)
|
||||
theta_t = theta_0 * t
|
||||
sin_theta_t = np.sin(theta_t)
|
||||
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
||||
s1 = sin_theta_t / sin_theta_0
|
||||
v2 = s0 * v0 + s1 * v1
|
||||
|
||||
if inputs_are_torch:
|
||||
v2 = torch.from_numpy(v2).to(input_device)
|
||||
|
||||
return v2
|
||||
|
||||
|
||||
def spherical_dist_loss(x, y):
|
||||
x = F.normalize(x, dim=-1)
|
||||
y = F.normalize(y, dim=-1)
|
||||
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
|
||||
|
||||
|
||||
def set_requires_grad(model, value):
|
||||
for param in model.parameters():
|
||||
param.requires_grad = value
|
||||
|
||||
|
||||
class CLIPGuidedImagesMixingStableDiffusion(DiffusionPipeline):
|
||||
def __init__(
|
||||
self,
|
||||
vae: AutoencoderKL,
|
||||
text_encoder: CLIPTextModel,
|
||||
clip_model: CLIPModel,
|
||||
tokenizer: CLIPTokenizer,
|
||||
unet: UNet2DConditionModel,
|
||||
scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
|
||||
feature_extractor: CLIPFeatureExtractor,
|
||||
coca_model=None,
|
||||
coca_tokenizer=None,
|
||||
coca_transform=None,
|
||||
):
|
||||
super().__init__()
|
||||
self.register_modules(
|
||||
vae=vae,
|
||||
text_encoder=text_encoder,
|
||||
clip_model=clip_model,
|
||||
tokenizer=tokenizer,
|
||||
unet=unet,
|
||||
scheduler=scheduler,
|
||||
feature_extractor=feature_extractor,
|
||||
coca_model=coca_model,
|
||||
coca_tokenizer=coca_tokenizer,
|
||||
coca_transform=coca_transform,
|
||||
)
|
||||
self.feature_extractor_size = (
|
||||
feature_extractor.size
|
||||
if isinstance(feature_extractor.size, int)
|
||||
else feature_extractor.size["shortest_edge"]
|
||||
)
|
||||
self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
|
||||
set_requires_grad(self.text_encoder, False)
|
||||
set_requires_grad(self.clip_model, False)
|
||||
|
||||
def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
|
||||
if slice_size == "auto":
|
||||
# half the attention head size is usually a good trade-off between
|
||||
# speed and memory
|
||||
slice_size = self.unet.config.attention_head_dim // 2
|
||||
self.unet.set_attention_slice(slice_size)
|
||||
|
||||
def disable_attention_slicing(self):
|
||||
self.enable_attention_slicing(None)
|
||||
|
||||
def freeze_vae(self):
|
||||
set_requires_grad(self.vae, False)
|
||||
|
||||
def unfreeze_vae(self):
|
||||
set_requires_grad(self.vae, True)
|
||||
|
||||
def freeze_unet(self):
|
||||
set_requires_grad(self.unet, False)
|
||||
|
||||
def unfreeze_unet(self):
|
||||
set_requires_grad(self.unet, True)
|
||||
|
||||
def get_timesteps(self, num_inference_steps, strength, device):
|
||||
# get the original timestep using init_timestep
|
||||
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
||||
|
||||
t_start = max(num_inference_steps - init_timestep, 0)
|
||||
timesteps = self.scheduler.timesteps[t_start:]
|
||||
|
||||
return timesteps, num_inference_steps - t_start
|
||||
|
||||
def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None):
|
||||
if not isinstance(image, torch.Tensor):
|
||||
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(image)}")
|
||||
|
||||
image = image.to(device=device, dtype=dtype)
|
||||
|
||||
if isinstance(generator, list):
|
||||
init_latents = [
|
||||
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
|
||||
]
|
||||
init_latents = torch.cat(init_latents, dim=0)
|
||||
else:
|
||||
init_latents = self.vae.encode(image).latent_dist.sample(generator)
|
||||
|
||||
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
|
||||
init_latents = 0.18215 * init_latents
|
||||
init_latents = init_latents.repeat_interleave(batch_size, dim=0)
|
||||
|
||||
noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype)
|
||||
|
||||
# get latents
|
||||
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
||||
latents = init_latents
|
||||
|
||||
return latents
|
||||
|
||||
def get_image_description(self, image):
|
||||
transformed_image = self.coca_transform(image).unsqueeze(0)
|
||||
with torch.no_grad(), torch.cuda.amp.autocast():
|
||||
generated = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
|
||||
generated = self.coca_tokenizer.decode(generated[0].cpu().numpy())
|
||||
return generated.split("<end_of_text>")[0].replace("<start_of_text>", "").rstrip(" .,")
|
||||
|
||||
def get_clip_image_embeddings(self, image, batch_size):
|
||||
clip_image_input = self.feature_extractor.preprocess(image)
|
||||
clip_image_features = torch.from_numpy(clip_image_input["pixel_values"][0]).unsqueeze(0).to(self.device).half()
|
||||
image_embeddings_clip = self.clip_model.get_image_features(clip_image_features)
|
||||
image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
|
||||
image_embeddings_clip = image_embeddings_clip.repeat_interleave(batch_size, dim=0)
|
||||
return image_embeddings_clip
|
||||
|
||||
@torch.enable_grad()
|
||||
def cond_fn(
|
||||
self,
|
||||
latents,
|
||||
timestep,
|
||||
index,
|
||||
text_embeddings,
|
||||
noise_pred_original,
|
||||
original_image_embeddings_clip,
|
||||
clip_guidance_scale,
|
||||
):
|
||||
latents = latents.detach().requires_grad_()
|
||||
|
||||
latent_model_input = self.scheduler.scale_model_input(latents, timestep)
|
||||
|
||||
# predict the noise residual
|
||||
noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
|
||||
|
||||
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
|
||||
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
|
||||
beta_prod_t = 1 - alpha_prod_t
|
||||
# compute predicted original sample from predicted noise also called
|
||||
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
|
||||
pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
|
||||
|
||||
fac = torch.sqrt(beta_prod_t)
|
||||
sample = pred_original_sample * (fac) + latents * (1 - fac)
|
||||
elif isinstance(self.scheduler, LMSDiscreteScheduler):
|
||||
sigma = self.scheduler.sigmas[index]
|
||||
sample = latents - sigma * noise_pred
|
||||
else:
|
||||
raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
|
||||
|
||||
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
|
||||
sample = 1 / 0.18215 * sample
|
||||
image = self.vae.decode(sample).sample
|
||||
image = (image / 2 + 0.5).clamp(0, 1)
|
||||
|
||||
image = transforms.Resize(self.feature_extractor_size)(image)
|
||||
image = self.normalize(image).to(latents.dtype)
|
||||
|
||||
image_embeddings_clip = self.clip_model.get_image_features(image)
|
||||
image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
|
||||
|
||||
loss = spherical_dist_loss(image_embeddings_clip, original_image_embeddings_clip).mean() * clip_guidance_scale
|
||||
|
||||
grads = -torch.autograd.grad(loss, latents)[0]
|
||||
|
||||
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
||||
latents = latents.detach() + grads * (sigma**2)
|
||||
noise_pred = noise_pred_original
|
||||
else:
|
||||
noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
|
||||
return noise_pred, latents
|
||||
|
||||
@torch.no_grad()
|
||||
def __call__(
|
||||
self,
|
||||
style_image: Union[torch.FloatTensor, PIL.Image.Image],
|
||||
content_image: Union[torch.FloatTensor, PIL.Image.Image],
|
||||
style_prompt: Optional[str] = None,
|
||||
content_prompt: Optional[str] = None,
|
||||
height: Optional[int] = 512,
|
||||
width: Optional[int] = 512,
|
||||
noise_strength: float = 0.6,
|
||||
num_inference_steps: Optional[int] = 50,
|
||||
guidance_scale: Optional[float] = 7.5,
|
||||
batch_size: Optional[int] = 1,
|
||||
eta: float = 0.0,
|
||||
clip_guidance_scale: Optional[float] = 100,
|
||||
generator: Optional[torch.Generator] = None,
|
||||
output_type: Optional[str] = "pil",
|
||||
return_dict: bool = True,
|
||||
slerp_latent_style_strength: float = 0.8,
|
||||
slerp_prompt_style_strength: float = 0.1,
|
||||
slerp_clip_image_style_strength: float = 0.1,
|
||||
):
|
||||
if isinstance(generator, list) and len(generator) != batch_size:
|
||||
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(generator)} generators.")
|
||||
|
||||
if height % 8 != 0 or width % 8 != 0:
|
||||
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
||||
|
||||
if isinstance(generator, torch.Generator) and batch_size > 1:
|
||||
generator = [generator] + [None] * (batch_size - 1)
|
||||
|
||||
coca_is_none = [
|
||||
("model", self.coca_model is None),
|
||||
("tokenizer", self.coca_tokenizer is None),
|
||||
("transform", self.coca_transform is None),
|
||||
]
|
||||
coca_is_none = [x[0] for x in coca_is_none if x[1]]
|
||||
coca_is_none_str = ", ".join(coca_is_none)
|
||||
# generate prompts with coca model if prompt is None
|
||||
if content_prompt is None:
|
||||
if len(coca_is_none):
|
||||
raise ValueError(
|
||||
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
|
||||
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
|
||||
)
|
||||
content_prompt = self.get_image_description(content_image)
|
||||
if style_prompt is None:
|
||||
if len(coca_is_none):
|
||||
raise ValueError(
|
||||
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
|
||||
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
|
||||
)
|
||||
style_prompt = self.get_image_description(style_image)
|
||||
|
||||
# get prompt text embeddings for content and style
|
||||
content_text_input = self.tokenizer(
|
||||
content_prompt,
|
||||
padding="max_length",
|
||||
max_length=self.tokenizer.model_max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
content_text_embeddings = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
|
||||
|
||||
style_text_input = self.tokenizer(
|
||||
style_prompt,
|
||||
padding="max_length",
|
||||
max_length=self.tokenizer.model_max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
style_text_embeddings = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
|
||||
|
||||
text_embeddings = slerp(slerp_prompt_style_strength, content_text_embeddings, style_text_embeddings)
|
||||
|
||||
# duplicate text embeddings for each generation per prompt
|
||||
text_embeddings = text_embeddings.repeat_interleave(batch_size, dim=0)
|
||||
|
||||
# set timesteps
|
||||
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
|
||||
extra_set_kwargs = {}
|
||||
if accepts_offset:
|
||||
extra_set_kwargs["offset"] = 1
|
||||
|
||||
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
|
||||
# Some schedulers like PNDM have timesteps as arrays
|
||||
# It's more optimized to move all timesteps to correct device beforehand
|
||||
self.scheduler.timesteps.to(self.device)
|
||||
|
||||
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, noise_strength, self.device)
|
||||
latent_timestep = timesteps[:1].repeat(batch_size)
|
||||
|
||||
# Preprocess image
|
||||
preprocessed_content_image = preprocess(content_image, width, height)
|
||||
content_latents = self.prepare_latents(
|
||||
preprocessed_content_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
|
||||
)
|
||||
|
||||
preprocessed_style_image = preprocess(style_image, width, height)
|
||||
style_latents = self.prepare_latents(
|
||||
preprocessed_style_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
|
||||
)
|
||||
|
||||
latents = slerp(slerp_latent_style_strength, content_latents, style_latents)
|
||||
|
||||
if clip_guidance_scale > 0:
|
||||
content_clip_image_embedding = self.get_clip_image_embeddings(content_image, batch_size)
|
||||
style_clip_image_embedding = self.get_clip_image_embeddings(style_image, batch_size)
|
||||
clip_image_embeddings = slerp(
|
||||
slerp_clip_image_style_strength, content_clip_image_embedding, style_clip_image_embedding
|
||||
)
|
||||
|
||||
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
||||
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
||||
# corresponds to doing no classifier free guidance.
|
||||
do_classifier_free_guidance = guidance_scale > 1.0
|
||||
# get unconditional embeddings for classifier free guidance
|
||||
if do_classifier_free_guidance:
|
||||
max_length = content_text_input.input_ids.shape[-1]
|
||||
uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
|
||||
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
||||
# duplicate unconditional embeddings for each generation per prompt
|
||||
uncond_embeddings = uncond_embeddings.repeat_interleave(batch_size, dim=0)
|
||||
|
||||
# For classifier free guidance, we need to do two forward passes.
|
||||
# Here we concatenate the unconditional and text embeddings into a single batch
|
||||
# to avoid doing two forward passes
|
||||
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
||||
|
||||
# get the initial random noise unless the user supplied it
|
||||
|
||||
# Unlike in other pipelines, latents need to be generated in the target device
|
||||
# for 1-to-1 results reproducibility with the CompVis implementation.
|
||||
# However this currently doesn't work in `mps`.
|
||||
latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
|
||||
latents_dtype = text_embeddings.dtype
|
||||
if latents is None:
|
||||
if self.device.type == "mps":
|
||||
# randn does not work reproducibly on mps
|
||||
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
||||
self.device
|
||||
)
|
||||
else:
|
||||
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
||||
else:
|
||||
if latents.shape != latents_shape:
|
||||
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
||||
latents = latents.to(self.device)
|
||||
|
||||
# scale the initial noise by the standard deviation required by the scheduler
|
||||
latents = latents * self.scheduler.init_noise_sigma
|
||||
|
||||
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
||||
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
||||
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
||||
# and should be between [0, 1]
|
||||
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
||||
extra_step_kwargs = {}
|
||||
if accepts_eta:
|
||||
extra_step_kwargs["eta"] = eta
|
||||
|
||||
# check if the scheduler accepts generator
|
||||
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
||||
if accepts_generator:
|
||||
extra_step_kwargs["generator"] = generator
|
||||
|
||||
with self.progress_bar(total=num_inference_steps):
|
||||
for i, t in enumerate(timesteps):
|
||||
# expand the latents if we are doing classifier free guidance
|
||||
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
||||
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
||||
|
||||
# predict the noise residual
|
||||
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
||||
|
||||
# perform classifier free guidance
|
||||
if do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
||||
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
||||
|
||||
# perform clip guidance
|
||||
if clip_guidance_scale > 0:
|
||||
text_embeddings_for_guidance = (
|
||||
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
|
||||
)
|
||||
noise_pred, latents = self.cond_fn(
|
||||
latents,
|
||||
t,
|
||||
i,
|
||||
text_embeddings_for_guidance,
|
||||
noise_pred,
|
||||
clip_image_embeddings,
|
||||
clip_guidance_scale,
|
||||
)
|
||||
|
||||
# compute the previous noisy sample x_t -> x_t-1
|
||||
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
||||
|
||||
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
|
||||
latents = 1 / 0.18215 * latents
|
||||
image = self.vae.decode(latents).sample
|
||||
|
||||
image = (image / 2 + 0.5).clamp(0, 1)
|
||||
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
||||
|
||||
if output_type == "pil":
|
||||
image = self.numpy_to_pil(image)
|
||||
|
||||
if not return_dict:
|
||||
return (image, None)
|
||||
|
||||
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
|
||||
264
examples/community/edict_pipeline.py
Normal file
264
examples/community/edict_pipeline.py
Normal file
@@ -0,0 +1,264 @@
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from PIL import Image
|
||||
from tqdm.auto import tqdm
|
||||
from transformers import CLIPTextModel, CLIPTokenizer
|
||||
|
||||
from diffusers import AutoencoderKL, DDIMScheduler, DiffusionPipeline, UNet2DConditionModel
|
||||
from diffusers.image_processor import VaeImageProcessor
|
||||
from diffusers.utils import (
|
||||
deprecate,
|
||||
)
|
||||
|
||||
|
||||
class EDICTPipeline(DiffusionPipeline):
|
||||
def __init__(
|
||||
self,
|
||||
vae: AutoencoderKL,
|
||||
text_encoder: CLIPTextModel,
|
||||
tokenizer: CLIPTokenizer,
|
||||
unet: UNet2DConditionModel,
|
||||
scheduler: DDIMScheduler,
|
||||
mixing_coeff: float = 0.93,
|
||||
leapfrog_steps: bool = True,
|
||||
):
|
||||
self.mixing_coeff = mixing_coeff
|
||||
self.leapfrog_steps = leapfrog_steps
|
||||
|
||||
super().__init__()
|
||||
self.register_modules(
|
||||
vae=vae,
|
||||
text_encoder=text_encoder,
|
||||
tokenizer=tokenizer,
|
||||
unet=unet,
|
||||
scheduler=scheduler,
|
||||
)
|
||||
|
||||
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
||||
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
||||
|
||||
def _encode_prompt(
|
||||
self, prompt: str, negative_prompt: Optional[str] = None, do_classifier_free_guidance: bool = False
|
||||
):
|
||||
text_inputs = self.tokenizer(
|
||||
prompt,
|
||||
padding="max_length",
|
||||
max_length=self.tokenizer.model_max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
prompt_embeds = self.text_encoder(text_inputs.input_ids.to(self.device)).last_hidden_state
|
||||
|
||||
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=self.device)
|
||||
|
||||
if do_classifier_free_guidance:
|
||||
uncond_tokens = "" if negative_prompt is None else negative_prompt
|
||||
|
||||
uncond_input = self.tokenizer(
|
||||
uncond_tokens,
|
||||
padding="max_length",
|
||||
max_length=self.tokenizer.model_max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(self.device)).last_hidden_state
|
||||
|
||||
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
||||
|
||||
return prompt_embeds
|
||||
|
||||
def denoise_mixing_layer(self, x: torch.Tensor, y: torch.Tensor):
|
||||
x = self.mixing_coeff * x + (1 - self.mixing_coeff) * y
|
||||
y = self.mixing_coeff * y + (1 - self.mixing_coeff) * x
|
||||
|
||||
return [x, y]
|
||||
|
||||
def noise_mixing_layer(self, x: torch.Tensor, y: torch.Tensor):
|
||||
y = (y - (1 - self.mixing_coeff) * x) / self.mixing_coeff
|
||||
x = (x - (1 - self.mixing_coeff) * y) / self.mixing_coeff
|
||||
|
||||
return [x, y]
|
||||
|
||||
def _get_alpha_and_beta(self, t: torch.Tensor):
|
||||
# as self.alphas_cumprod is always in cpu
|
||||
t = int(t)
|
||||
|
||||
alpha_prod = self.scheduler.alphas_cumprod[t] if t >= 0 else self.scheduler.final_alpha_cumprod
|
||||
|
||||
return alpha_prod, 1 - alpha_prod
|
||||
|
||||
def noise_step(
|
||||
self,
|
||||
base: torch.Tensor,
|
||||
model_input: torch.Tensor,
|
||||
model_output: torch.Tensor,
|
||||
timestep: torch.Tensor,
|
||||
):
|
||||
prev_timestep = timestep - self.scheduler.config.num_train_timesteps / self.scheduler.num_inference_steps
|
||||
|
||||
alpha_prod_t, beta_prod_t = self._get_alpha_and_beta(timestep)
|
||||
alpha_prod_t_prev, beta_prod_t_prev = self._get_alpha_and_beta(prev_timestep)
|
||||
|
||||
a_t = (alpha_prod_t_prev / alpha_prod_t) ** 0.5
|
||||
b_t = -a_t * (beta_prod_t**0.5) + beta_prod_t_prev**0.5
|
||||
|
||||
next_model_input = (base - b_t * model_output) / a_t
|
||||
|
||||
return model_input, next_model_input.to(base.dtype)
|
||||
|
||||
def denoise_step(
|
||||
self,
|
||||
base: torch.Tensor,
|
||||
model_input: torch.Tensor,
|
||||
model_output: torch.Tensor,
|
||||
timestep: torch.Tensor,
|
||||
):
|
||||
prev_timestep = timestep - self.scheduler.config.num_train_timesteps / self.scheduler.num_inference_steps
|
||||
|
||||
alpha_prod_t, beta_prod_t = self._get_alpha_and_beta(timestep)
|
||||
alpha_prod_t_prev, beta_prod_t_prev = self._get_alpha_and_beta(prev_timestep)
|
||||
|
||||
a_t = (alpha_prod_t_prev / alpha_prod_t) ** 0.5
|
||||
b_t = -a_t * (beta_prod_t**0.5) + beta_prod_t_prev**0.5
|
||||
next_model_input = a_t * base + b_t * model_output
|
||||
|
||||
return model_input, next_model_input.to(base.dtype)
|
||||
|
||||
@torch.no_grad()
|
||||
def decode_latents(self, latents: torch.Tensor):
|
||||
latents = 1 / self.vae.config.scaling_factor * latents
|
||||
image = self.vae.decode(latents).sample
|
||||
image = (image / 2 + 0.5).clamp(0, 1)
|
||||
return image
|
||||
|
||||
@torch.no_grad()
|
||||
def prepare_latents(
|
||||
self,
|
||||
image: Image.Image,
|
||||
text_embeds: torch.Tensor,
|
||||
timesteps: torch.Tensor,
|
||||
guidance_scale: float,
|
||||
generator: Optional[torch.Generator] = None,
|
||||
):
|
||||
do_classifier_free_guidance = guidance_scale > 1.0
|
||||
|
||||
image = image.to(device=self.device, dtype=text_embeds.dtype)
|
||||
latent = self.vae.encode(image).latent_dist.sample(generator)
|
||||
|
||||
latent = self.vae.config.scaling_factor * latent
|
||||
|
||||
coupled_latents = [latent.clone(), latent.clone()]
|
||||
|
||||
for i, t in tqdm(enumerate(timesteps), total=len(timesteps)):
|
||||
coupled_latents = self.noise_mixing_layer(x=coupled_latents[0], y=coupled_latents[1])
|
||||
|
||||
# j - model_input index, k - base index
|
||||
for j in range(2):
|
||||
k = j ^ 1
|
||||
|
||||
if self.leapfrog_steps:
|
||||
if i % 2 == 0:
|
||||
k, j = j, k
|
||||
|
||||
model_input = coupled_latents[j]
|
||||
base = coupled_latents[k]
|
||||
|
||||
latent_model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
|
||||
|
||||
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeds).sample
|
||||
|
||||
if do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
||||
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
||||
|
||||
base, model_input = self.noise_step(
|
||||
base=base,
|
||||
model_input=model_input,
|
||||
model_output=noise_pred,
|
||||
timestep=t,
|
||||
)
|
||||
|
||||
coupled_latents[k] = model_input
|
||||
|
||||
return coupled_latents
|
||||
|
||||
@torch.no_grad()
|
||||
def __call__(
|
||||
self,
|
||||
base_prompt: str,
|
||||
target_prompt: str,
|
||||
image: Image.Image,
|
||||
guidance_scale: float = 3.0,
|
||||
num_inference_steps: int = 50,
|
||||
strength: float = 0.8,
|
||||
negative_prompt: Optional[str] = None,
|
||||
generator: Optional[torch.Generator] = None,
|
||||
output_type: Optional[str] = "pil",
|
||||
):
|
||||
do_classifier_free_guidance = guidance_scale > 1.0
|
||||
|
||||
image = self.image_processor.preprocess(image)
|
||||
|
||||
base_embeds = self._encode_prompt(base_prompt, negative_prompt, do_classifier_free_guidance)
|
||||
target_embeds = self._encode_prompt(target_prompt, negative_prompt, do_classifier_free_guidance)
|
||||
|
||||
self.scheduler.set_timesteps(num_inference_steps, self.device)
|
||||
|
||||
t_limit = num_inference_steps - int(num_inference_steps * strength)
|
||||
fwd_timesteps = self.scheduler.timesteps[t_limit:]
|
||||
bwd_timesteps = fwd_timesteps.flip(0)
|
||||
|
||||
coupled_latents = self.prepare_latents(image, base_embeds, bwd_timesteps, guidance_scale, generator)
|
||||
|
||||
for i, t in tqdm(enumerate(fwd_timesteps), total=len(fwd_timesteps)):
|
||||
# j - model_input index, k - base index
|
||||
for k in range(2):
|
||||
j = k ^ 1
|
||||
|
||||
if self.leapfrog_steps:
|
||||
if i % 2 == 1:
|
||||
k, j = j, k
|
||||
|
||||
model_input = coupled_latents[j]
|
||||
base = coupled_latents[k]
|
||||
|
||||
latent_model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
|
||||
|
||||
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=target_embeds).sample
|
||||
|
||||
if do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
||||
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
||||
|
||||
base, model_input = self.denoise_step(
|
||||
base=base,
|
||||
model_input=model_input,
|
||||
model_output=noise_pred,
|
||||
timestep=t,
|
||||
)
|
||||
|
||||
coupled_latents[k] = model_input
|
||||
|
||||
coupled_latents = self.denoise_mixing_layer(x=coupled_latents[0], y=coupled_latents[1])
|
||||
|
||||
# either one is fine
|
||||
final_latent = coupled_latents[0]
|
||||
|
||||
if output_type not in ["latent", "pt", "np", "pil"]:
|
||||
deprecation_message = (
|
||||
f"the output_type {output_type} is outdated. Please make sure to set it to one of these instead: "
|
||||
"`pil`, `np`, `pt`, `latent`"
|
||||
)
|
||||
deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False)
|
||||
output_type = "np"
|
||||
|
||||
if output_type == "latent":
|
||||
image = final_latent
|
||||
else:
|
||||
image = self.decode_latents(final_latent)
|
||||
image = self.image_processor.postprocess(image, output_type=output_type)
|
||||
|
||||
return image
|
||||
File diff suppressed because it is too large
Load Diff
401
examples/community/mixture.py
Normal file
401
examples/community/mixture.py
Normal file
@@ -0,0 +1,401 @@
|
||||
import inspect
|
||||
from copy import deepcopy
|
||||
from enum import Enum
|
||||
from typing import List, Optional, Tuple, Union
|
||||
|
||||
import torch
|
||||
from ligo.segments import segment
|
||||
from tqdm.auto import tqdm
|
||||
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
||||
|
||||
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
||||
from diffusers.pipeline_utils import DiffusionPipeline
|
||||
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
||||
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
||||
from diffusers.utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
||||
|
||||
EXAMPLE_DOC_STRING = """
|
||||
Examples:
|
||||
```py
|
||||
>>> from diffusers import LMSDiscreteScheduler
|
||||
>>> from mixdiff import StableDiffusionTilingPipeline
|
||||
|
||||
>>> scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
||||
>>> pipeline = StableDiffusionTilingPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=scheduler)
|
||||
>>> pipeline.to("cuda:0")
|
||||
|
||||
>>> image = pipeline(
|
||||
>>> prompt=[[
|
||||
>>> "A charming house in the countryside, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
||||
>>> "A dirt road in the countryside crossing pastures, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
||||
>>> "An old and rusty giant robot lying on a dirt road, by jakub rozalski, dark sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece"
|
||||
>>> ]],
|
||||
>>> tile_height=640,
|
||||
>>> tile_width=640,
|
||||
>>> tile_row_overlap=0,
|
||||
>>> tile_col_overlap=256,
|
||||
>>> guidance_scale=8,
|
||||
>>> seed=7178915308,
|
||||
>>> num_inference_steps=50,
|
||||
>>> )["images"][0]
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
def _tile2pixel_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
|
||||
"""Given a tile row and column numbers returns the range of pixels affected by that tiles in the overall image
|
||||
|
||||
Returns a tuple with:
|
||||
- Starting coordinates of rows in pixel space
|
||||
- Ending coordinates of rows in pixel space
|
||||
- Starting coordinates of columns in pixel space
|
||||
- Ending coordinates of columns in pixel space
|
||||
"""
|
||||
px_row_init = 0 if tile_row == 0 else tile_row * (tile_height - tile_row_overlap)
|
||||
px_row_end = px_row_init + tile_height
|
||||
px_col_init = 0 if tile_col == 0 else tile_col * (tile_width - tile_col_overlap)
|
||||
px_col_end = px_col_init + tile_width
|
||||
return px_row_init, px_row_end, px_col_init, px_col_end
|
||||
|
||||
|
||||
def _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end):
|
||||
"""Translates coordinates in pixel space to coordinates in latent space"""
|
||||
return px_row_init // 8, px_row_end // 8, px_col_init // 8, px_col_end // 8
|
||||
|
||||
|
||||
def _tile2latent_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
|
||||
"""Given a tile row and column numbers returns the range of latents affected by that tiles in the overall image
|
||||
|
||||
Returns a tuple with:
|
||||
- Starting coordinates of rows in latent space
|
||||
- Ending coordinates of rows in latent space
|
||||
- Starting coordinates of columns in latent space
|
||||
- Ending coordinates of columns in latent space
|
||||
"""
|
||||
px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices(
|
||||
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
||||
)
|
||||
return _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end)
|
||||
|
||||
|
||||
def _tile2latent_exclusive_indices(
|
||||
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, rows, columns
|
||||
):
|
||||
"""Given a tile row and column numbers returns the range of latents affected only by that tile in the overall image
|
||||
|
||||
Returns a tuple with:
|
||||
- Starting coordinates of rows in latent space
|
||||
- Ending coordinates of rows in latent space
|
||||
- Starting coordinates of columns in latent space
|
||||
- Ending coordinates of columns in latent space
|
||||
"""
|
||||
row_init, row_end, col_init, col_end = _tile2latent_indices(
|
||||
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
||||
)
|
||||
row_segment = segment(row_init, row_end)
|
||||
col_segment = segment(col_init, col_end)
|
||||
# Iterate over the rest of tiles, clipping the region for the current tile
|
||||
for row in range(rows):
|
||||
for column in range(columns):
|
||||
if row != tile_row and column != tile_col:
|
||||
clip_row_init, clip_row_end, clip_col_init, clip_col_end = _tile2latent_indices(
|
||||
row, column, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
||||
)
|
||||
row_segment = row_segment - segment(clip_row_init, clip_row_end)
|
||||
col_segment = col_segment - segment(clip_col_init, clip_col_end)
|
||||
# return row_init, row_end, col_init, col_end
|
||||
return row_segment[0], row_segment[1], col_segment[0], col_segment[1]
|
||||
|
||||
|
||||
class StableDiffusionExtrasMixin:
|
||||
"""Mixin providing additional convenience method to Stable Diffusion pipelines"""
|
||||
|
||||
def decode_latents(self, latents, cpu_vae=False):
|
||||
"""Decodes a given array of latents into pixel space"""
|
||||
# scale and decode the image latents with vae
|
||||
if cpu_vae:
|
||||
lat = deepcopy(latents).cpu()
|
||||
vae = deepcopy(self.vae).cpu()
|
||||
else:
|
||||
lat = latents
|
||||
vae = self.vae
|
||||
|
||||
lat = 1 / 0.18215 * lat
|
||||
image = vae.decode(lat).sample
|
||||
|
||||
image = (image / 2 + 0.5).clamp(0, 1)
|
||||
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
||||
|
||||
return self.numpy_to_pil(image)
|
||||
|
||||
|
||||
class StableDiffusionTilingPipeline(DiffusionPipeline, StableDiffusionExtrasMixin):
|
||||
def __init__(
|
||||
self,
|
||||
vae: AutoencoderKL,
|
||||
text_encoder: CLIPTextModel,
|
||||
tokenizer: CLIPTokenizer,
|
||||
unet: UNet2DConditionModel,
|
||||
scheduler: Union[DDIMScheduler, PNDMScheduler],
|
||||
safety_checker: StableDiffusionSafetyChecker,
|
||||
feature_extractor: CLIPFeatureExtractor,
|
||||
):
|
||||
super().__init__()
|
||||
self.register_modules(
|
||||
vae=vae,
|
||||
text_encoder=text_encoder,
|
||||
tokenizer=tokenizer,
|
||||
unet=unet,
|
||||
scheduler=scheduler,
|
||||
safety_checker=safety_checker,
|
||||
feature_extractor=feature_extractor,
|
||||
)
|
||||
|
||||
class SeedTilesMode(Enum):
|
||||
"""Modes in which the latents of a particular tile can be re-seeded"""
|
||||
|
||||
FULL = "full"
|
||||
EXCLUSIVE = "exclusive"
|
||||
|
||||
@torch.no_grad()
|
||||
def __call__(
|
||||
self,
|
||||
prompt: Union[str, List[List[str]]],
|
||||
num_inference_steps: Optional[int] = 50,
|
||||
guidance_scale: Optional[float] = 7.5,
|
||||
eta: Optional[float] = 0.0,
|
||||
seed: Optional[int] = None,
|
||||
tile_height: Optional[int] = 512,
|
||||
tile_width: Optional[int] = 512,
|
||||
tile_row_overlap: Optional[int] = 256,
|
||||
tile_col_overlap: Optional[int] = 256,
|
||||
guidance_scale_tiles: Optional[List[List[float]]] = None,
|
||||
seed_tiles: Optional[List[List[int]]] = None,
|
||||
seed_tiles_mode: Optional[Union[str, List[List[str]]]] = "full",
|
||||
seed_reroll_regions: Optional[List[Tuple[int, int, int, int, int]]] = None,
|
||||
cpu_vae: Optional[bool] = False,
|
||||
):
|
||||
r"""
|
||||
Function to run the diffusion pipeline with tiling support.
|
||||
|
||||
Args:
|
||||
prompt: either a single string (no tiling) or a list of lists with all the prompts to use (one list for each row of tiles). This will also define the tiling structure.
|
||||
num_inference_steps: number of diffusions steps.
|
||||
guidance_scale: classifier-free guidance.
|
||||
seed: general random seed to initialize latents.
|
||||
tile_height: height in pixels of each grid tile.
|
||||
tile_width: width in pixels of each grid tile.
|
||||
tile_row_overlap: number of overlap pixels between tiles in consecutive rows.
|
||||
tile_col_overlap: number of overlap pixels between tiles in consecutive columns.
|
||||
guidance_scale_tiles: specific weights for classifier-free guidance in each tile.
|
||||
guidance_scale_tiles: specific weights for classifier-free guidance in each tile. If None, the value provided in guidance_scale will be used.
|
||||
seed_tiles: specific seeds for the initialization latents in each tile. These will override the latents generated for the whole canvas using the standard seed parameter.
|
||||
seed_tiles_mode: either "full" "exclusive". If "full", all the latents affected by the tile be overriden. If "exclusive", only the latents that are affected exclusively by this tile (and no other tiles) will be overrriden.
|
||||
seed_reroll_regions: a list of tuples in the form (start row, end row, start column, end column, seed) defining regions in pixel space for which the latents will be overriden using the given seed. Takes priority over seed_tiles.
|
||||
cpu_vae: the decoder from latent space to pixel space can require too mucho GPU RAM for large images. If you find out of memory errors at the end of the generation process, try setting this parameter to True to run the decoder in CPU. Slower, but should run without memory issues.
|
||||
|
||||
Examples:
|
||||
|
||||
Returns:
|
||||
A PIL image with the generated image.
|
||||
|
||||
"""
|
||||
if not isinstance(prompt, list) or not all(isinstance(row, list) for row in prompt):
|
||||
raise ValueError(f"`prompt` has to be a list of lists but is {type(prompt)}")
|
||||
grid_rows = len(prompt)
|
||||
grid_cols = len(prompt[0])
|
||||
if not all(len(row) == grid_cols for row in prompt):
|
||||
raise ValueError("All prompt rows must have the same number of prompt columns")
|
||||
if not isinstance(seed_tiles_mode, str) and (
|
||||
not isinstance(seed_tiles_mode, list) or not all(isinstance(row, list) for row in seed_tiles_mode)
|
||||
):
|
||||
raise ValueError(f"`seed_tiles_mode` has to be a string or list of lists but is {type(prompt)}")
|
||||
if isinstance(seed_tiles_mode, str):
|
||||
seed_tiles_mode = [[seed_tiles_mode for _ in range(len(row))] for row in prompt]
|
||||
modes = [mode.value for mode in self.SeedTilesMode]
|
||||
if any(mode not in modes for row in seed_tiles_mode for mode in row):
|
||||
raise ValueError(f"Seed tiles mode must be one of {modes}")
|
||||
if seed_reroll_regions is None:
|
||||
seed_reroll_regions = []
|
||||
batch_size = 1
|
||||
|
||||
# create original noisy latents using the timesteps
|
||||
height = tile_height + (grid_rows - 1) * (tile_height - tile_row_overlap)
|
||||
width = tile_width + (grid_cols - 1) * (tile_width - tile_col_overlap)
|
||||
latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
|
||||
generator = torch.Generator("cuda").manual_seed(seed)
|
||||
latents = torch.randn(latents_shape, generator=generator, device=self.device)
|
||||
|
||||
# overwrite latents for specific tiles if provided
|
||||
if seed_tiles is not None:
|
||||
for row in range(grid_rows):
|
||||
for col in range(grid_cols):
|
||||
if (seed_tile := seed_tiles[row][col]) is not None:
|
||||
mode = seed_tiles_mode[row][col]
|
||||
if mode == self.SeedTilesMode.FULL.value:
|
||||
row_init, row_end, col_init, col_end = _tile2latent_indices(
|
||||
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
||||
)
|
||||
else:
|
||||
row_init, row_end, col_init, col_end = _tile2latent_exclusive_indices(
|
||||
row,
|
||||
col,
|
||||
tile_width,
|
||||
tile_height,
|
||||
tile_row_overlap,
|
||||
tile_col_overlap,
|
||||
grid_rows,
|
||||
grid_cols,
|
||||
)
|
||||
tile_generator = torch.Generator("cuda").manual_seed(seed_tile)
|
||||
tile_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
|
||||
latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
|
||||
tile_shape, generator=tile_generator, device=self.device
|
||||
)
|
||||
|
||||
# overwrite again for seed reroll regions
|
||||
for row_init, row_end, col_init, col_end, seed_reroll in seed_reroll_regions:
|
||||
row_init, row_end, col_init, col_end = _pixel2latent_indices(
|
||||
row_init, row_end, col_init, col_end
|
||||
) # to latent space coordinates
|
||||
reroll_generator = torch.Generator("cuda").manual_seed(seed_reroll)
|
||||
region_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
|
||||
latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
|
||||
region_shape, generator=reroll_generator, device=self.device
|
||||
)
|
||||
|
||||
# Prepare scheduler
|
||||
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
|
||||
extra_set_kwargs = {}
|
||||
if accepts_offset:
|
||||
extra_set_kwargs["offset"] = 1
|
||||
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
|
||||
# if we use LMSDiscreteScheduler, let's make sure latents are multiplied by sigmas
|
||||
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
||||
latents = latents * self.scheduler.sigmas[0]
|
||||
|
||||
# get prompts text embeddings
|
||||
text_input = [
|
||||
[
|
||||
self.tokenizer(
|
||||
col,
|
||||
padding="max_length",
|
||||
max_length=self.tokenizer.model_max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
for col in row
|
||||
]
|
||||
for row in prompt
|
||||
]
|
||||
text_embeddings = [[self.text_encoder(col.input_ids.to(self.device))[0] for col in row] for row in text_input]
|
||||
|
||||
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
||||
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
||||
# corresponds to doing no classifier free guidance.
|
||||
do_classifier_free_guidance = guidance_scale > 1.0 # TODO: also active if any tile has guidance scale
|
||||
# get unconditional embeddings for classifier free guidance
|
||||
if do_classifier_free_guidance:
|
||||
for i in range(grid_rows):
|
||||
for j in range(grid_cols):
|
||||
max_length = text_input[i][j].input_ids.shape[-1]
|
||||
uncond_input = self.tokenizer(
|
||||
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
|
||||
)
|
||||
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
||||
|
||||
# For classifier free guidance, we need to do two forward passes.
|
||||
# Here we concatenate the unconditional and text embeddings into a single batch
|
||||
# to avoid doing two forward passes
|
||||
text_embeddings[i][j] = torch.cat([uncond_embeddings, text_embeddings[i][j]])
|
||||
|
||||
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
||||
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
||||
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
||||
# and should be between [0, 1]
|
||||
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
||||
extra_step_kwargs = {}
|
||||
if accepts_eta:
|
||||
extra_step_kwargs["eta"] = eta
|
||||
|
||||
# Mask for tile weights strenght
|
||||
tile_weights = self._gaussian_weights(tile_width, tile_height, batch_size)
|
||||
|
||||
# Diffusion timesteps
|
||||
for i, t in tqdm(enumerate(self.scheduler.timesteps)):
|
||||
# Diffuse each tile
|
||||
noise_preds = []
|
||||
for row in range(grid_rows):
|
||||
noise_preds_row = []
|
||||
for col in range(grid_cols):
|
||||
px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
|
||||
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
||||
)
|
||||
tile_latents = latents[:, :, px_row_init:px_row_end, px_col_init:px_col_end]
|
||||
# expand the latents if we are doing classifier free guidance
|
||||
latent_model_input = torch.cat([tile_latents] * 2) if do_classifier_free_guidance else tile_latents
|
||||
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
||||
# predict the noise residual
|
||||
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings[row][col])[
|
||||
"sample"
|
||||
]
|
||||
# perform guidance
|
||||
if do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
||||
guidance = (
|
||||
guidance_scale
|
||||
if guidance_scale_tiles is None or guidance_scale_tiles[row][col] is None
|
||||
else guidance_scale_tiles[row][col]
|
||||
)
|
||||
noise_pred_tile = noise_pred_uncond + guidance * (noise_pred_text - noise_pred_uncond)
|
||||
noise_preds_row.append(noise_pred_tile)
|
||||
noise_preds.append(noise_preds_row)
|
||||
# Stitch noise predictions for all tiles
|
||||
noise_pred = torch.zeros(latents.shape, device=self.device)
|
||||
contributors = torch.zeros(latents.shape, device=self.device)
|
||||
# Add each tile contribution to overall latents
|
||||
for row in range(grid_rows):
|
||||
for col in range(grid_cols):
|
||||
px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
|
||||
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
||||
)
|
||||
noise_pred[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += (
|
||||
noise_preds[row][col] * tile_weights
|
||||
)
|
||||
contributors[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += tile_weights
|
||||
# Average overlapping areas with more than 1 contributor
|
||||
noise_pred /= contributors
|
||||
|
||||
# compute the previous noisy sample x_t -> x_t-1
|
||||
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
|
||||
|
||||
# scale and decode the image latents with vae
|
||||
image = self.decode_latents(latents, cpu_vae)
|
||||
|
||||
return {"images": image}
|
||||
|
||||
def _gaussian_weights(self, tile_width, tile_height, nbatches):
|
||||
"""Generates a gaussian mask of weights for tile contributions"""
|
||||
import numpy as np
|
||||
from numpy import exp, pi, sqrt
|
||||
|
||||
latent_width = tile_width // 8
|
||||
latent_height = tile_height // 8
|
||||
|
||||
var = 0.01
|
||||
midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1
|
||||
x_probs = [
|
||||
exp(-(x - midpoint) * (x - midpoint) / (latent_width * latent_width) / (2 * var)) / sqrt(2 * pi * var)
|
||||
for x in range(latent_width)
|
||||
]
|
||||
midpoint = latent_height / 2
|
||||
y_probs = [
|
||||
exp(-(y - midpoint) * (y - midpoint) / (latent_height * latent_height) / (2 * var)) / sqrt(2 * pi * var)
|
||||
for y in range(latent_height)
|
||||
]
|
||||
|
||||
weights = np.outer(y_probs, x_probs)
|
||||
return torch.tile(torch.tensor(weights, device=self.device), (nbatches, self.unet.config.in_channels, 1, 1))
|
||||
405
examples/community/mixture_tiling.py
Normal file
405
examples/community/mixture_tiling.py
Normal file
@@ -0,0 +1,405 @@
|
||||
import inspect
|
||||
from copy import deepcopy
|
||||
from enum import Enum
|
||||
from typing import List, Optional, Tuple, Union
|
||||
|
||||
import torch
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
||||
from diffusers.pipeline_utils import DiffusionPipeline
|
||||
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
||||
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
||||
from diffusers.utils import logging
|
||||
|
||||
|
||||
try:
|
||||
from ligo.segments import segment
|
||||
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
||||
except ImportError:
|
||||
raise ImportError("Please install transformers and ligo-segments to use the mixture pipeline")
|
||||
|
||||
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
||||
|
||||
EXAMPLE_DOC_STRING = """
|
||||
Examples:
|
||||
```py
|
||||
>>> from diffusers import LMSDiscreteScheduler, DiffusionPipeline
|
||||
|
||||
>>> scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
||||
>>> pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=scheduler, custom_pipeline="mixture_tiling")
|
||||
>>> pipeline.to("cuda")
|
||||
|
||||
>>> image = pipeline(
|
||||
>>> prompt=[[
|
||||
>>> "A charming house in the countryside, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
||||
>>> "A dirt road in the countryside crossing pastures, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
||||
>>> "An old and rusty giant robot lying on a dirt road, by jakub rozalski, dark sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece"
|
||||
>>> ]],
|
||||
>>> tile_height=640,
|
||||
>>> tile_width=640,
|
||||
>>> tile_row_overlap=0,
|
||||
>>> tile_col_overlap=256,
|
||||
>>> guidance_scale=8,
|
||||
>>> seed=7178915308,
|
||||
>>> num_inference_steps=50,
|
||||
>>> )["images"][0]
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
def _tile2pixel_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
|
||||
"""Given a tile row and column numbers returns the range of pixels affected by that tiles in the overall image
|
||||
|
||||
Returns a tuple with:
|
||||
- Starting coordinates of rows in pixel space
|
||||
- Ending coordinates of rows in pixel space
|
||||
- Starting coordinates of columns in pixel space
|
||||
- Ending coordinates of columns in pixel space
|
||||
"""
|
||||
px_row_init = 0 if tile_row == 0 else tile_row * (tile_height - tile_row_overlap)
|
||||
px_row_end = px_row_init + tile_height
|
||||
px_col_init = 0 if tile_col == 0 else tile_col * (tile_width - tile_col_overlap)
|
||||
px_col_end = px_col_init + tile_width
|
||||
return px_row_init, px_row_end, px_col_init, px_col_end
|
||||
|
||||
|
||||
def _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end):
|
||||
"""Translates coordinates in pixel space to coordinates in latent space"""
|
||||
return px_row_init // 8, px_row_end // 8, px_col_init // 8, px_col_end // 8
|
||||
|
||||
|
||||
def _tile2latent_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
|
||||
"""Given a tile row and column numbers returns the range of latents affected by that tiles in the overall image
|
||||
|
||||
Returns a tuple with:
|
||||
- Starting coordinates of rows in latent space
|
||||
- Ending coordinates of rows in latent space
|
||||
- Starting coordinates of columns in latent space
|
||||
- Ending coordinates of columns in latent space
|
||||
"""
|
||||
px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices(
|
||||
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
||||
)
|
||||
return _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end)
|
||||
|
||||
|
||||
def _tile2latent_exclusive_indices(
|
||||
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, rows, columns
|
||||
):
|
||||
"""Given a tile row and column numbers returns the range of latents affected only by that tile in the overall image
|
||||
|
||||
Returns a tuple with:
|
||||
- Starting coordinates of rows in latent space
|
||||
- Ending coordinates of rows in latent space
|
||||
- Starting coordinates of columns in latent space
|
||||
- Ending coordinates of columns in latent space
|
||||
"""
|
||||
row_init, row_end, col_init, col_end = _tile2latent_indices(
|
||||
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
||||
)
|
||||
row_segment = segment(row_init, row_end)
|
||||
col_segment = segment(col_init, col_end)
|
||||
# Iterate over the rest of tiles, clipping the region for the current tile
|
||||
for row in range(rows):
|
||||
for column in range(columns):
|
||||
if row != tile_row and column != tile_col:
|
||||
clip_row_init, clip_row_end, clip_col_init, clip_col_end = _tile2latent_indices(
|
||||
row, column, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
||||
)
|
||||
row_segment = row_segment - segment(clip_row_init, clip_row_end)
|
||||
col_segment = col_segment - segment(clip_col_init, clip_col_end)
|
||||
# return row_init, row_end, col_init, col_end
|
||||
return row_segment[0], row_segment[1], col_segment[0], col_segment[1]
|
||||
|
||||
|
||||
class StableDiffusionExtrasMixin:
|
||||
"""Mixin providing additional convenience method to Stable Diffusion pipelines"""
|
||||
|
||||
def decode_latents(self, latents, cpu_vae=False):
|
||||
"""Decodes a given array of latents into pixel space"""
|
||||
# scale and decode the image latents with vae
|
||||
if cpu_vae:
|
||||
lat = deepcopy(latents).cpu()
|
||||
vae = deepcopy(self.vae).cpu()
|
||||
else:
|
||||
lat = latents
|
||||
vae = self.vae
|
||||
|
||||
lat = 1 / 0.18215 * lat
|
||||
image = vae.decode(lat).sample
|
||||
|
||||
image = (image / 2 + 0.5).clamp(0, 1)
|
||||
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
||||
|
||||
return self.numpy_to_pil(image)
|
||||
|
||||
|
||||
class StableDiffusionTilingPipeline(DiffusionPipeline, StableDiffusionExtrasMixin):
|
||||
def __init__(
|
||||
self,
|
||||
vae: AutoencoderKL,
|
||||
text_encoder: CLIPTextModel,
|
||||
tokenizer: CLIPTokenizer,
|
||||
unet: UNet2DConditionModel,
|
||||
scheduler: Union[DDIMScheduler, PNDMScheduler],
|
||||
safety_checker: StableDiffusionSafetyChecker,
|
||||
feature_extractor: CLIPFeatureExtractor,
|
||||
):
|
||||
super().__init__()
|
||||
self.register_modules(
|
||||
vae=vae,
|
||||
text_encoder=text_encoder,
|
||||
tokenizer=tokenizer,
|
||||
unet=unet,
|
||||
scheduler=scheduler,
|
||||
safety_checker=safety_checker,
|
||||
feature_extractor=feature_extractor,
|
||||
)
|
||||
|
||||
class SeedTilesMode(Enum):
|
||||
"""Modes in which the latents of a particular tile can be re-seeded"""
|
||||
|
||||
FULL = "full"
|
||||
EXCLUSIVE = "exclusive"
|
||||
|
||||
@torch.no_grad()
|
||||
def __call__(
|
||||
self,
|
||||
prompt: Union[str, List[List[str]]],
|
||||
num_inference_steps: Optional[int] = 50,
|
||||
guidance_scale: Optional[float] = 7.5,
|
||||
eta: Optional[float] = 0.0,
|
||||
seed: Optional[int] = None,
|
||||
tile_height: Optional[int] = 512,
|
||||
tile_width: Optional[int] = 512,
|
||||
tile_row_overlap: Optional[int] = 256,
|
||||
tile_col_overlap: Optional[int] = 256,
|
||||
guidance_scale_tiles: Optional[List[List[float]]] = None,
|
||||
seed_tiles: Optional[List[List[int]]] = None,
|
||||
seed_tiles_mode: Optional[Union[str, List[List[str]]]] = "full",
|
||||
seed_reroll_regions: Optional[List[Tuple[int, int, int, int, int]]] = None,
|
||||
cpu_vae: Optional[bool] = False,
|
||||
):
|
||||
r"""
|
||||
Function to run the diffusion pipeline with tiling support.
|
||||
|
||||
Args:
|
||||
prompt: either a single string (no tiling) or a list of lists with all the prompts to use (one list for each row of tiles). This will also define the tiling structure.
|
||||
num_inference_steps: number of diffusions steps.
|
||||
guidance_scale: classifier-free guidance.
|
||||
seed: general random seed to initialize latents.
|
||||
tile_height: height in pixels of each grid tile.
|
||||
tile_width: width in pixels of each grid tile.
|
||||
tile_row_overlap: number of overlap pixels between tiles in consecutive rows.
|
||||
tile_col_overlap: number of overlap pixels between tiles in consecutive columns.
|
||||
guidance_scale_tiles: specific weights for classifier-free guidance in each tile.
|
||||
guidance_scale_tiles: specific weights for classifier-free guidance in each tile. If None, the value provided in guidance_scale will be used.
|
||||
seed_tiles: specific seeds for the initialization latents in each tile. These will override the latents generated for the whole canvas using the standard seed parameter.
|
||||
seed_tiles_mode: either "full" "exclusive". If "full", all the latents affected by the tile be overriden. If "exclusive", only the latents that are affected exclusively by this tile (and no other tiles) will be overrriden.
|
||||
seed_reroll_regions: a list of tuples in the form (start row, end row, start column, end column, seed) defining regions in pixel space for which the latents will be overriden using the given seed. Takes priority over seed_tiles.
|
||||
cpu_vae: the decoder from latent space to pixel space can require too mucho GPU RAM for large images. If you find out of memory errors at the end of the generation process, try setting this parameter to True to run the decoder in CPU. Slower, but should run without memory issues.
|
||||
|
||||
Examples:
|
||||
|
||||
Returns:
|
||||
A PIL image with the generated image.
|
||||
|
||||
"""
|
||||
if not isinstance(prompt, list) or not all(isinstance(row, list) for row in prompt):
|
||||
raise ValueError(f"`prompt` has to be a list of lists but is {type(prompt)}")
|
||||
grid_rows = len(prompt)
|
||||
grid_cols = len(prompt[0])
|
||||
if not all(len(row) == grid_cols for row in prompt):
|
||||
raise ValueError("All prompt rows must have the same number of prompt columns")
|
||||
if not isinstance(seed_tiles_mode, str) and (
|
||||
not isinstance(seed_tiles_mode, list) or not all(isinstance(row, list) for row in seed_tiles_mode)
|
||||
):
|
||||
raise ValueError(f"`seed_tiles_mode` has to be a string or list of lists but is {type(prompt)}")
|
||||
if isinstance(seed_tiles_mode, str):
|
||||
seed_tiles_mode = [[seed_tiles_mode for _ in range(len(row))] for row in prompt]
|
||||
|
||||
modes = [mode.value for mode in self.SeedTilesMode]
|
||||
if any(mode not in modes for row in seed_tiles_mode for mode in row):
|
||||
raise ValueError(f"Seed tiles mode must be one of {modes}")
|
||||
if seed_reroll_regions is None:
|
||||
seed_reroll_regions = []
|
||||
batch_size = 1
|
||||
|
||||
# create original noisy latents using the timesteps
|
||||
height = tile_height + (grid_rows - 1) * (tile_height - tile_row_overlap)
|
||||
width = tile_width + (grid_cols - 1) * (tile_width - tile_col_overlap)
|
||||
latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
|
||||
generator = torch.Generator("cuda").manual_seed(seed)
|
||||
latents = torch.randn(latents_shape, generator=generator, device=self.device)
|
||||
|
||||
# overwrite latents for specific tiles if provided
|
||||
if seed_tiles is not None:
|
||||
for row in range(grid_rows):
|
||||
for col in range(grid_cols):
|
||||
if (seed_tile := seed_tiles[row][col]) is not None:
|
||||
mode = seed_tiles_mode[row][col]
|
||||
if mode == self.SeedTilesMode.FULL.value:
|
||||
row_init, row_end, col_init, col_end = _tile2latent_indices(
|
||||
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
||||
)
|
||||
else:
|
||||
row_init, row_end, col_init, col_end = _tile2latent_exclusive_indices(
|
||||
row,
|
||||
col,
|
||||
tile_width,
|
||||
tile_height,
|
||||
tile_row_overlap,
|
||||
tile_col_overlap,
|
||||
grid_rows,
|
||||
grid_cols,
|
||||
)
|
||||
tile_generator = torch.Generator("cuda").manual_seed(seed_tile)
|
||||
tile_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
|
||||
latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
|
||||
tile_shape, generator=tile_generator, device=self.device
|
||||
)
|
||||
|
||||
# overwrite again for seed reroll regions
|
||||
for row_init, row_end, col_init, col_end, seed_reroll in seed_reroll_regions:
|
||||
row_init, row_end, col_init, col_end = _pixel2latent_indices(
|
||||
row_init, row_end, col_init, col_end
|
||||
) # to latent space coordinates
|
||||
reroll_generator = torch.Generator("cuda").manual_seed(seed_reroll)
|
||||
region_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
|
||||
latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
|
||||
region_shape, generator=reroll_generator, device=self.device
|
||||
)
|
||||
|
||||
# Prepare scheduler
|
||||
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
|
||||
extra_set_kwargs = {}
|
||||
if accepts_offset:
|
||||
extra_set_kwargs["offset"] = 1
|
||||
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
|
||||
# if we use LMSDiscreteScheduler, let's make sure latents are multiplied by sigmas
|
||||
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
||||
latents = latents * self.scheduler.sigmas[0]
|
||||
|
||||
# get prompts text embeddings
|
||||
text_input = [
|
||||
[
|
||||
self.tokenizer(
|
||||
col,
|
||||
padding="max_length",
|
||||
max_length=self.tokenizer.model_max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
for col in row
|
||||
]
|
||||
for row in prompt
|
||||
]
|
||||
text_embeddings = [[self.text_encoder(col.input_ids.to(self.device))[0] for col in row] for row in text_input]
|
||||
|
||||
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
||||
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
||||
# corresponds to doing no classifier free guidance.
|
||||
do_classifier_free_guidance = guidance_scale > 1.0 # TODO: also active if any tile has guidance scale
|
||||
# get unconditional embeddings for classifier free guidance
|
||||
if do_classifier_free_guidance:
|
||||
for i in range(grid_rows):
|
||||
for j in range(grid_cols):
|
||||
max_length = text_input[i][j].input_ids.shape[-1]
|
||||
uncond_input = self.tokenizer(
|
||||
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
|
||||
)
|
||||
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
||||
|
||||
# For classifier free guidance, we need to do two forward passes.
|
||||
# Here we concatenate the unconditional and text embeddings into a single batch
|
||||
# to avoid doing two forward passes
|
||||
text_embeddings[i][j] = torch.cat([uncond_embeddings, text_embeddings[i][j]])
|
||||
|
||||
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
||||
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
||||
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
||||
# and should be between [0, 1]
|
||||
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
||||
extra_step_kwargs = {}
|
||||
if accepts_eta:
|
||||
extra_step_kwargs["eta"] = eta
|
||||
|
||||
# Mask for tile weights strenght
|
||||
tile_weights = self._gaussian_weights(tile_width, tile_height, batch_size)
|
||||
|
||||
# Diffusion timesteps
|
||||
for i, t in tqdm(enumerate(self.scheduler.timesteps)):
|
||||
# Diffuse each tile
|
||||
noise_preds = []
|
||||
for row in range(grid_rows):
|
||||
noise_preds_row = []
|
||||
for col in range(grid_cols):
|
||||
px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
|
||||
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
||||
)
|
||||
tile_latents = latents[:, :, px_row_init:px_row_end, px_col_init:px_col_end]
|
||||
# expand the latents if we are doing classifier free guidance
|
||||
latent_model_input = torch.cat([tile_latents] * 2) if do_classifier_free_guidance else tile_latents
|
||||
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
||||
# predict the noise residual
|
||||
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings[row][col])[
|
||||
"sample"
|
||||
]
|
||||
# perform guidance
|
||||
if do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
||||
guidance = (
|
||||
guidance_scale
|
||||
if guidance_scale_tiles is None or guidance_scale_tiles[row][col] is None
|
||||
else guidance_scale_tiles[row][col]
|
||||
)
|
||||
noise_pred_tile = noise_pred_uncond + guidance * (noise_pred_text - noise_pred_uncond)
|
||||
noise_preds_row.append(noise_pred_tile)
|
||||
noise_preds.append(noise_preds_row)
|
||||
# Stitch noise predictions for all tiles
|
||||
noise_pred = torch.zeros(latents.shape, device=self.device)
|
||||
contributors = torch.zeros(latents.shape, device=self.device)
|
||||
# Add each tile contribution to overall latents
|
||||
for row in range(grid_rows):
|
||||
for col in range(grid_cols):
|
||||
px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
|
||||
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
||||
)
|
||||
noise_pred[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += (
|
||||
noise_preds[row][col] * tile_weights
|
||||
)
|
||||
contributors[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += tile_weights
|
||||
# Average overlapping areas with more than 1 contributor
|
||||
noise_pred /= contributors
|
||||
|
||||
# compute the previous noisy sample x_t -> x_t-1
|
||||
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
|
||||
|
||||
# scale and decode the image latents with vae
|
||||
image = self.decode_latents(latents, cpu_vae)
|
||||
|
||||
return {"images": image}
|
||||
|
||||
def _gaussian_weights(self, tile_width, tile_height, nbatches):
|
||||
"""Generates a gaussian mask of weights for tile contributions"""
|
||||
import numpy as np
|
||||
from numpy import exp, pi, sqrt
|
||||
|
||||
latent_width = tile_width // 8
|
||||
latent_height = tile_height // 8
|
||||
|
||||
var = 0.01
|
||||
midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1
|
||||
x_probs = [
|
||||
exp(-(x - midpoint) * (x - midpoint) / (latent_width * latent_width) / (2 * var)) / sqrt(2 * pi * var)
|
||||
for x in range(latent_width)
|
||||
]
|
||||
midpoint = latent_height / 2
|
||||
y_probs = [
|
||||
exp(-(y - midpoint) * (y - midpoint) / (latent_height * latent_height) / (2 * var)) / sqrt(2 * pi * var)
|
||||
for y in range(latent_height)
|
||||
]
|
||||
|
||||
weights = np.outer(y_probs, x_probs)
|
||||
return torch.tile(torch.tensor(weights, device=self.device), (nbatches, self.unet.config.in_channels, 1, 1))
|
||||
@@ -1,7 +1,7 @@
|
||||
# Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/
|
||||
|
||||
import inspect
|
||||
from typing import Any, Callable, Dict, List, Optional, Union
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
import PIL.Image
|
||||
@@ -11,6 +11,7 @@ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
||||
|
||||
from diffusers import AutoencoderKL, ControlNetModel, DiffusionPipeline, UNet2DConditionModel, logging
|
||||
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
|
||||
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
|
||||
from diffusers.schedulers import KarrasDiffusionSchedulers
|
||||
from diffusers.utils import (
|
||||
PIL_INTERPOLATION,
|
||||
@@ -184,7 +185,14 @@ def prepare_mask_image(mask_image):
|
||||
|
||||
|
||||
def prepare_controlnet_conditioning_image(
|
||||
controlnet_conditioning_image, width, height, batch_size, num_images_per_prompt, device, dtype
|
||||
controlnet_conditioning_image,
|
||||
width,
|
||||
height,
|
||||
batch_size,
|
||||
num_images_per_prompt,
|
||||
device,
|
||||
dtype,
|
||||
do_classifier_free_guidance,
|
||||
):
|
||||
if not isinstance(controlnet_conditioning_image, torch.Tensor):
|
||||
if isinstance(controlnet_conditioning_image, PIL.Image.Image):
|
||||
@@ -214,6 +222,9 @@ def prepare_controlnet_conditioning_image(
|
||||
|
||||
controlnet_conditioning_image = controlnet_conditioning_image.to(device=device, dtype=dtype)
|
||||
|
||||
if do_classifier_free_guidance:
|
||||
controlnet_conditioning_image = torch.cat([controlnet_conditioning_image] * 2)
|
||||
|
||||
return controlnet_conditioning_image
|
||||
|
||||
|
||||
@@ -230,7 +241,7 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline):
|
||||
text_encoder: CLIPTextModel,
|
||||
tokenizer: CLIPTokenizer,
|
||||
unet: UNet2DConditionModel,
|
||||
controlnet: ControlNetModel,
|
||||
controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
|
||||
scheduler: KarrasDiffusionSchedulers,
|
||||
safety_checker: StableDiffusionSafetyChecker,
|
||||
feature_extractor: CLIPImageProcessor,
|
||||
@@ -254,6 +265,9 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline):
|
||||
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
||||
)
|
||||
|
||||
if isinstance(controlnet, (list, tuple)):
|
||||
controlnet = MultiControlNetModel(controlnet)
|
||||
|
||||
self.register_modules(
|
||||
vae=vae,
|
||||
text_encoder=text_encoder,
|
||||
@@ -264,6 +278,7 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline):
|
||||
safety_checker=safety_checker,
|
||||
feature_extractor=feature_extractor,
|
||||
)
|
||||
|
||||
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
||||
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
||||
|
||||
@@ -522,6 +537,42 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline):
|
||||
extra_step_kwargs["generator"] = generator
|
||||
return extra_step_kwargs
|
||||
|
||||
def check_controlnet_conditioning_image(self, image, prompt, prompt_embeds):
|
||||
image_is_pil = isinstance(image, PIL.Image.Image)
|
||||
image_is_tensor = isinstance(image, torch.Tensor)
|
||||
image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
|
||||
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
|
||||
|
||||
if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:
|
||||
raise TypeError(
|
||||
"image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors"
|
||||
)
|
||||
|
||||
if image_is_pil:
|
||||
image_batch_size = 1
|
||||
elif image_is_tensor:
|
||||
image_batch_size = image.shape[0]
|
||||
elif image_is_pil_list:
|
||||
image_batch_size = len(image)
|
||||
elif image_is_tensor_list:
|
||||
image_batch_size = len(image)
|
||||
else:
|
||||
raise ValueError("controlnet condition image is not valid")
|
||||
|
||||
if prompt is not None and isinstance(prompt, str):
|
||||
prompt_batch_size = 1
|
||||
elif prompt is not None and isinstance(prompt, list):
|
||||
prompt_batch_size = len(prompt)
|
||||
elif prompt_embeds is not None:
|
||||
prompt_batch_size = prompt_embeds.shape[0]
|
||||
else:
|
||||
raise ValueError("prompt or prompt_embeds are not valid")
|
||||
|
||||
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
|
||||
raise ValueError(
|
||||
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
|
||||
)
|
||||
|
||||
def check_inputs(
|
||||
self,
|
||||
prompt,
|
||||
@@ -534,6 +585,7 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline):
|
||||
negative_prompt=None,
|
||||
prompt_embeds=None,
|
||||
negative_prompt_embeds=None,
|
||||
controlnet_conditioning_scale=None,
|
||||
):
|
||||
if height % 8 != 0 or width % 8 != 0:
|
||||
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
||||
@@ -572,45 +624,35 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline):
|
||||
f" {negative_prompt_embeds.shape}."
|
||||
)
|
||||
|
||||
controlnet_cond_image_is_pil = isinstance(controlnet_conditioning_image, PIL.Image.Image)
|
||||
controlnet_cond_image_is_tensor = isinstance(controlnet_conditioning_image, torch.Tensor)
|
||||
controlnet_cond_image_is_pil_list = isinstance(controlnet_conditioning_image, list) and isinstance(
|
||||
controlnet_conditioning_image[0], PIL.Image.Image
|
||||
)
|
||||
controlnet_cond_image_is_tensor_list = isinstance(controlnet_conditioning_image, list) and isinstance(
|
||||
controlnet_conditioning_image[0], torch.Tensor
|
||||
)
|
||||
# check controlnet condition image
|
||||
if isinstance(self.controlnet, ControlNetModel):
|
||||
self.check_controlnet_conditioning_image(controlnet_conditioning_image, prompt, prompt_embeds)
|
||||
elif isinstance(self.controlnet, MultiControlNetModel):
|
||||
if not isinstance(controlnet_conditioning_image, list):
|
||||
raise TypeError("For multiple controlnets: `image` must be type `list`")
|
||||
if len(controlnet_conditioning_image) != len(self.controlnet.nets):
|
||||
raise ValueError(
|
||||
"For multiple controlnets: `image` must have the same length as the number of controlnets."
|
||||
)
|
||||
for image_ in controlnet_conditioning_image:
|
||||
self.check_controlnet_conditioning_image(image_, prompt, prompt_embeds)
|
||||
else:
|
||||
assert False
|
||||
|
||||
if (
|
||||
not controlnet_cond_image_is_pil
|
||||
and not controlnet_cond_image_is_tensor
|
||||
and not controlnet_cond_image_is_pil_list
|
||||
and not controlnet_cond_image_is_tensor_list
|
||||
):
|
||||
raise TypeError(
|
||||
"image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors"
|
||||
)
|
||||
|
||||
if controlnet_cond_image_is_pil:
|
||||
controlnet_cond_image_batch_size = 1
|
||||
elif controlnet_cond_image_is_tensor:
|
||||
controlnet_cond_image_batch_size = controlnet_conditioning_image.shape[0]
|
||||
elif controlnet_cond_image_is_pil_list:
|
||||
controlnet_cond_image_batch_size = len(controlnet_conditioning_image)
|
||||
elif controlnet_cond_image_is_tensor_list:
|
||||
controlnet_cond_image_batch_size = len(controlnet_conditioning_image)
|
||||
|
||||
if prompt is not None and isinstance(prompt, str):
|
||||
prompt_batch_size = 1
|
||||
elif prompt is not None and isinstance(prompt, list):
|
||||
prompt_batch_size = len(prompt)
|
||||
elif prompt_embeds is not None:
|
||||
prompt_batch_size = prompt_embeds.shape[0]
|
||||
|
||||
if controlnet_cond_image_batch_size != 1 and controlnet_cond_image_batch_size != prompt_batch_size:
|
||||
raise ValueError(
|
||||
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {controlnet_cond_image_batch_size}, prompt batch size: {prompt_batch_size}"
|
||||
)
|
||||
# Check `controlnet_conditioning_scale`
|
||||
if isinstance(self.controlnet, ControlNetModel):
|
||||
if not isinstance(controlnet_conditioning_scale, float):
|
||||
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
||||
elif isinstance(self.controlnet, MultiControlNetModel):
|
||||
if isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
|
||||
self.controlnet.nets
|
||||
):
|
||||
raise ValueError(
|
||||
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
|
||||
" the same length as the number of controlnets"
|
||||
)
|
||||
else:
|
||||
assert False
|
||||
|
||||
if isinstance(image, torch.Tensor) and not isinstance(mask_image, torch.Tensor):
|
||||
raise TypeError("if `image` is a tensor, `mask_image` must also be a tensor")
|
||||
@@ -630,6 +672,8 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline):
|
||||
image_channels, image_height, image_width = image.shape
|
||||
elif image.ndim == 4:
|
||||
image_batch_size, image_channels, image_height, image_width = image.shape
|
||||
else:
|
||||
assert False
|
||||
|
||||
if mask_image.ndim == 2:
|
||||
mask_image_batch_size = 1
|
||||
@@ -797,7 +841,7 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline):
|
||||
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
||||
callback_steps: int = 1,
|
||||
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
||||
controlnet_conditioning_scale: float = 1.0,
|
||||
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
|
||||
):
|
||||
r"""
|
||||
Function invoked when calling the pipeline for generation.
|
||||
@@ -897,6 +941,7 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline):
|
||||
negative_prompt,
|
||||
prompt_embeds,
|
||||
negative_prompt_embeds,
|
||||
controlnet_conditioning_scale,
|
||||
)
|
||||
|
||||
# 2. Define call parameters
|
||||
@@ -913,6 +958,9 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline):
|
||||
# corresponds to doing no classifier free guidance.
|
||||
do_classifier_free_guidance = guidance_scale > 1.0
|
||||
|
||||
if isinstance(self.controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
||||
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(self.controlnet.nets)
|
||||
|
||||
# 3. Encode input prompt
|
||||
prompt_embeds = self._encode_prompt(
|
||||
prompt,
|
||||
@@ -929,15 +977,37 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline):
|
||||
|
||||
mask_image = prepare_mask_image(mask_image)
|
||||
|
||||
controlnet_conditioning_image = prepare_controlnet_conditioning_image(
|
||||
controlnet_conditioning_image,
|
||||
width,
|
||||
height,
|
||||
batch_size * num_images_per_prompt,
|
||||
num_images_per_prompt,
|
||||
device,
|
||||
self.controlnet.dtype,
|
||||
)
|
||||
# condition image(s)
|
||||
if isinstance(self.controlnet, ControlNetModel):
|
||||
controlnet_conditioning_image = prepare_controlnet_conditioning_image(
|
||||
controlnet_conditioning_image=controlnet_conditioning_image,
|
||||
width=width,
|
||||
height=height,
|
||||
batch_size=batch_size * num_images_per_prompt,
|
||||
num_images_per_prompt=num_images_per_prompt,
|
||||
device=device,
|
||||
dtype=self.controlnet.dtype,
|
||||
do_classifier_free_guidance=do_classifier_free_guidance,
|
||||
)
|
||||
elif isinstance(self.controlnet, MultiControlNetModel):
|
||||
controlnet_conditioning_images = []
|
||||
|
||||
for image_ in controlnet_conditioning_image:
|
||||
image_ = prepare_controlnet_conditioning_image(
|
||||
controlnet_conditioning_image=image_,
|
||||
width=width,
|
||||
height=height,
|
||||
batch_size=batch_size * num_images_per_prompt,
|
||||
num_images_per_prompt=num_images_per_prompt,
|
||||
device=device,
|
||||
dtype=self.controlnet.dtype,
|
||||
do_classifier_free_guidance=do_classifier_free_guidance,
|
||||
)
|
||||
controlnet_conditioning_images.append(image_)
|
||||
|
||||
controlnet_conditioning_image = controlnet_conditioning_images
|
||||
else:
|
||||
assert False
|
||||
|
||||
masked_image = image * (mask_image < 0.5)
|
||||
|
||||
@@ -979,9 +1049,6 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline):
|
||||
do_classifier_free_guidance,
|
||||
)
|
||||
|
||||
if do_classifier_free_guidance:
|
||||
controlnet_conditioning_image = torch.cat([controlnet_conditioning_image] * 2)
|
||||
|
||||
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
||||
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
||||
|
||||
@@ -1007,15 +1074,10 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline):
|
||||
t,
|
||||
encoder_hidden_states=prompt_embeds,
|
||||
controlnet_cond=controlnet_conditioning_image,
|
||||
conditioning_scale=controlnet_conditioning_scale,
|
||||
return_dict=False,
|
||||
)
|
||||
|
||||
down_block_res_samples = [
|
||||
down_block_res_sample * controlnet_conditioning_scale
|
||||
for down_block_res_sample in down_block_res_samples
|
||||
]
|
||||
mid_block_res_sample *= controlnet_conditioning_scale
|
||||
|
||||
# predict the noise residual
|
||||
noise_pred = self.unet(
|
||||
inpainting_latent_model_input,
|
||||
|
||||
822
examples/community/stable_diffusion_controlnet_reference.py
Normal file
822
examples/community/stable_diffusion_controlnet_reference.py
Normal file
@@ -0,0 +1,822 @@
|
||||
# Inspired by: https://github.com/Mikubill/sd-webui-controlnet/discussions/1236 and https://github.com/Mikubill/sd-webui-controlnet/discussions/1280
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import PIL.Image
|
||||
import torch
|
||||
|
||||
from diffusers import StableDiffusionControlNetPipeline
|
||||
from diffusers.models import ControlNetModel
|
||||
from diffusers.models.attention import BasicTransformerBlock
|
||||
from diffusers.models.unet_2d_blocks import CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, UpBlock2D
|
||||
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
|
||||
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
||||
from diffusers.utils import is_compiled_module, logging, randn_tensor
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
||||
|
||||
EXAMPLE_DOC_STRING = """
|
||||
Examples:
|
||||
```py
|
||||
>>> import cv2
|
||||
>>> import torch
|
||||
>>> import numpy as np
|
||||
>>> from PIL import Image
|
||||
>>> from diffusers import UniPCMultistepScheduler
|
||||
>>> from diffusers.utils import load_image
|
||||
|
||||
>>> input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png")
|
||||
|
||||
>>> # get canny image
|
||||
>>> image = cv2.Canny(np.array(input_image), 100, 200)
|
||||
>>> image = image[:, :, None]
|
||||
>>> image = np.concatenate([image, image, image], axis=2)
|
||||
>>> canny_image = Image.fromarray(image)
|
||||
|
||||
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
||||
>>> pipe = StableDiffusionControlNetReferencePipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
controlnet=controlnet,
|
||||
safety_checker=None,
|
||||
torch_dtype=torch.float16
|
||||
).to('cuda:0')
|
||||
|
||||
>>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe_controlnet.scheduler.config)
|
||||
|
||||
>>> result_img = pipe(ref_image=input_image,
|
||||
prompt="1girl",
|
||||
image=canny_image,
|
||||
num_inference_steps=20,
|
||||
reference_attn=True,
|
||||
reference_adain=True).images[0]
|
||||
|
||||
>>> result_img.show()
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
def torch_dfs(model: torch.nn.Module):
|
||||
result = [model]
|
||||
for child in model.children():
|
||||
result += torch_dfs(child)
|
||||
return result
|
||||
|
||||
|
||||
class StableDiffusionControlNetReferencePipeline(StableDiffusionControlNetPipeline):
|
||||
def prepare_ref_latents(self, refimage, batch_size, dtype, device, generator, do_classifier_free_guidance):
|
||||
refimage = refimage.to(device=device, dtype=dtype)
|
||||
|
||||
# encode the mask image into latents space so we can concatenate it to the latents
|
||||
if isinstance(generator, list):
|
||||
ref_image_latents = [
|
||||
self.vae.encode(refimage[i : i + 1]).latent_dist.sample(generator=generator[i])
|
||||
for i in range(batch_size)
|
||||
]
|
||||
ref_image_latents = torch.cat(ref_image_latents, dim=0)
|
||||
else:
|
||||
ref_image_latents = self.vae.encode(refimage).latent_dist.sample(generator=generator)
|
||||
ref_image_latents = self.vae.config.scaling_factor * ref_image_latents
|
||||
|
||||
# duplicate mask and ref_image_latents for each generation per prompt, using mps friendly method
|
||||
if ref_image_latents.shape[0] < batch_size:
|
||||
if not batch_size % ref_image_latents.shape[0] == 0:
|
||||
raise ValueError(
|
||||
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
|
||||
f" to a total batch size of {batch_size}, but {ref_image_latents.shape[0]} images were passed."
|
||||
" Make sure the number of images that you pass is divisible by the total requested batch size."
|
||||
)
|
||||
ref_image_latents = ref_image_latents.repeat(batch_size // ref_image_latents.shape[0], 1, 1, 1)
|
||||
|
||||
ref_image_latents = torch.cat([ref_image_latents] * 2) if do_classifier_free_guidance else ref_image_latents
|
||||
|
||||
# aligning device to prevent device errors when concating it with the latent model input
|
||||
ref_image_latents = ref_image_latents.to(device=device, dtype=dtype)
|
||||
return ref_image_latents
|
||||
|
||||
@torch.no_grad()
|
||||
def __call__(
|
||||
self,
|
||||
prompt: Union[str, List[str]] = None,
|
||||
image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] = None,
|
||||
ref_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
||||
height: Optional[int] = None,
|
||||
width: Optional[int] = None,
|
||||
num_inference_steps: int = 50,
|
||||
guidance_scale: float = 7.5,
|
||||
negative_prompt: Optional[Union[str, List[str]]] = None,
|
||||
num_images_per_prompt: Optional[int] = 1,
|
||||
eta: float = 0.0,
|
||||
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
||||
latents: Optional[torch.FloatTensor] = None,
|
||||
prompt_embeds: Optional[torch.FloatTensor] = None,
|
||||
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
||||
output_type: Optional[str] = "pil",
|
||||
return_dict: bool = True,
|
||||
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
||||
callback_steps: int = 1,
|
||||
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
||||
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
|
||||
guess_mode: bool = False,
|
||||
attention_auto_machine_weight: float = 1.0,
|
||||
gn_auto_machine_weight: float = 1.0,
|
||||
style_fidelity: float = 0.5,
|
||||
reference_attn: bool = True,
|
||||
reference_adain: bool = True,
|
||||
):
|
||||
r"""
|
||||
Function invoked when calling the pipeline for generation.
|
||||
|
||||
Args:
|
||||
prompt (`str` or `List[str]`, *optional*):
|
||||
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
||||
instead.
|
||||
image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`,
|
||||
`List[List[torch.FloatTensor]]`, or `List[List[PIL.Image.Image]]`):
|
||||
The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
|
||||
the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
|
||||
also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
|
||||
height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
|
||||
specified in init, images must be passed as a list such that each element of the list can be correctly
|
||||
batched for input to a single controlnet.
|
||||
ref_image (`torch.FloatTensor`, `PIL.Image.Image`):
|
||||
The Reference Control input condition. Reference Control uses this input condition to generate guidance to Unet. If
|
||||
the type is specified as `Torch.FloatTensor`, it is passed to Reference Control as is. `PIL.Image.Image` can
|
||||
also be accepted as an image.
|
||||
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
||||
The height in pixels of the generated image.
|
||||
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
||||
The width in pixels of the generated image.
|
||||
num_inference_steps (`int`, *optional*, defaults to 50):
|
||||
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
||||
expense of slower inference.
|
||||
guidance_scale (`float`, *optional*, defaults to 7.5):
|
||||
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
||||
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
||||
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
||||
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
||||
usually at the expense of lower image quality.
|
||||
negative_prompt (`str` or `List[str]`, *optional*):
|
||||
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
||||
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
||||
less than `1`).
|
||||
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
||||
The number of images to generate per prompt.
|
||||
eta (`float`, *optional*, defaults to 0.0):
|
||||
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
||||
[`schedulers.DDIMScheduler`], will be ignored for others.
|
||||
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
||||
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
||||
to make generation deterministic.
|
||||
latents (`torch.FloatTensor`, *optional*):
|
||||
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
||||
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
||||
tensor will ge generated by sampling using the supplied random `generator`.
|
||||
prompt_embeds (`torch.FloatTensor`, *optional*):
|
||||
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
||||
provided, text embeddings will be generated from `prompt` input argument.
|
||||
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
||||
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
||||
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
||||
argument.
|
||||
output_type (`str`, *optional*, defaults to `"pil"`):
|
||||
The output format of the generate image. Choose between
|
||||
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
||||
return_dict (`bool`, *optional*, defaults to `True`):
|
||||
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
||||
plain tuple.
|
||||
callback (`Callable`, *optional*):
|
||||
A function that will be called every `callback_steps` steps during inference. The function will be
|
||||
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
||||
callback_steps (`int`, *optional*, defaults to 1):
|
||||
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
||||
called at every step.
|
||||
cross_attention_kwargs (`dict`, *optional*):
|
||||
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
||||
`self.processor` in
|
||||
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
|
||||
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
||||
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
|
||||
to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
|
||||
corresponding scale as a list.
|
||||
guess_mode (`bool`, *optional*, defaults to `False`):
|
||||
In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
|
||||
you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
|
||||
attention_auto_machine_weight (`float`):
|
||||
Weight of using reference query for self attention's context.
|
||||
If attention_auto_machine_weight=1.0, use reference query for all self attention's context.
|
||||
gn_auto_machine_weight (`float`):
|
||||
Weight of using reference adain. If gn_auto_machine_weight=2.0, use all reference adain plugins.
|
||||
style_fidelity (`float`):
|
||||
style fidelity of ref_uncond_xt. If style_fidelity=1.0, control more important,
|
||||
elif style_fidelity=0.0, prompt more important, else balanced.
|
||||
reference_attn (`bool`):
|
||||
Whether to use reference query for self attention's context.
|
||||
reference_adain (`bool`):
|
||||
Whether to use reference adain.
|
||||
|
||||
Examples:
|
||||
|
||||
Returns:
|
||||
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
||||
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
||||
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
||||
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
||||
(nsfw) content, according to the `safety_checker`.
|
||||
"""
|
||||
# 0. Default height and width to unet
|
||||
height, width = self._default_height_width(height, width, image)
|
||||
|
||||
# 1. Check inputs. Raise error if not correct
|
||||
self.check_inputs(
|
||||
prompt,
|
||||
image,
|
||||
height,
|
||||
width,
|
||||
callback_steps,
|
||||
negative_prompt,
|
||||
prompt_embeds,
|
||||
negative_prompt_embeds,
|
||||
controlnet_conditioning_scale,
|
||||
)
|
||||
|
||||
# 2. Define call parameters
|
||||
if prompt is not None and isinstance(prompt, str):
|
||||
batch_size = 1
|
||||
elif prompt is not None and isinstance(prompt, list):
|
||||
batch_size = len(prompt)
|
||||
else:
|
||||
batch_size = prompt_embeds.shape[0]
|
||||
|
||||
device = self._execution_device
|
||||
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
||||
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
||||
# corresponds to doing no classifier free guidance.
|
||||
do_classifier_free_guidance = guidance_scale > 1.0
|
||||
|
||||
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
||||
|
||||
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
||||
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
|
||||
|
||||
global_pool_conditions = (
|
||||
controlnet.config.global_pool_conditions
|
||||
if isinstance(controlnet, ControlNetModel)
|
||||
else controlnet.nets[0].config.global_pool_conditions
|
||||
)
|
||||
guess_mode = guess_mode or global_pool_conditions
|
||||
|
||||
# 3. Encode input prompt
|
||||
prompt_embeds = self._encode_prompt(
|
||||
prompt,
|
||||
device,
|
||||
num_images_per_prompt,
|
||||
do_classifier_free_guidance,
|
||||
negative_prompt,
|
||||
prompt_embeds=prompt_embeds,
|
||||
negative_prompt_embeds=negative_prompt_embeds,
|
||||
)
|
||||
|
||||
# 4. Prepare image
|
||||
if isinstance(controlnet, ControlNetModel):
|
||||
image = self.prepare_image(
|
||||
image=image,
|
||||
width=width,
|
||||
height=height,
|
||||
batch_size=batch_size * num_images_per_prompt,
|
||||
num_images_per_prompt=num_images_per_prompt,
|
||||
device=device,
|
||||
dtype=controlnet.dtype,
|
||||
do_classifier_free_guidance=do_classifier_free_guidance,
|
||||
guess_mode=guess_mode,
|
||||
)
|
||||
elif isinstance(controlnet, MultiControlNetModel):
|
||||
images = []
|
||||
|
||||
for image_ in image:
|
||||
image_ = self.prepare_image(
|
||||
image=image_,
|
||||
width=width,
|
||||
height=height,
|
||||
batch_size=batch_size * num_images_per_prompt,
|
||||
num_images_per_prompt=num_images_per_prompt,
|
||||
device=device,
|
||||
dtype=controlnet.dtype,
|
||||
do_classifier_free_guidance=do_classifier_free_guidance,
|
||||
guess_mode=guess_mode,
|
||||
)
|
||||
|
||||
images.append(image_)
|
||||
|
||||
image = images
|
||||
else:
|
||||
assert False
|
||||
|
||||
# 5. Preprocess reference image
|
||||
ref_image = self.prepare_image(
|
||||
image=ref_image,
|
||||
width=width,
|
||||
height=height,
|
||||
batch_size=batch_size * num_images_per_prompt,
|
||||
num_images_per_prompt=num_images_per_prompt,
|
||||
device=device,
|
||||
dtype=prompt_embeds.dtype,
|
||||
)
|
||||
|
||||
# 6. Prepare timesteps
|
||||
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
||||
timesteps = self.scheduler.timesteps
|
||||
|
||||
# 7. Prepare latent variables
|
||||
num_channels_latents = self.unet.config.in_channels
|
||||
latents = self.prepare_latents(
|
||||
batch_size * num_images_per_prompt,
|
||||
num_channels_latents,
|
||||
height,
|
||||
width,
|
||||
prompt_embeds.dtype,
|
||||
device,
|
||||
generator,
|
||||
latents,
|
||||
)
|
||||
|
||||
# 8. Prepare reference latent variables
|
||||
ref_image_latents = self.prepare_ref_latents(
|
||||
ref_image,
|
||||
batch_size * num_images_per_prompt,
|
||||
prompt_embeds.dtype,
|
||||
device,
|
||||
generator,
|
||||
do_classifier_free_guidance,
|
||||
)
|
||||
|
||||
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
||||
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
||||
|
||||
# 9. Modify self attention and group norm
|
||||
MODE = "write"
|
||||
uc_mask = (
|
||||
torch.Tensor([1] * batch_size * num_images_per_prompt + [0] * batch_size * num_images_per_prompt)
|
||||
.type_as(ref_image_latents)
|
||||
.bool()
|
||||
)
|
||||
|
||||
def hacked_basic_transformer_inner_forward(
|
||||
self,
|
||||
hidden_states: torch.FloatTensor,
|
||||
attention_mask: Optional[torch.FloatTensor] = None,
|
||||
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
||||
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
||||
timestep: Optional[torch.LongTensor] = None,
|
||||
cross_attention_kwargs: Dict[str, Any] = None,
|
||||
class_labels: Optional[torch.LongTensor] = None,
|
||||
):
|
||||
if self.use_ada_layer_norm:
|
||||
norm_hidden_states = self.norm1(hidden_states, timestep)
|
||||
elif self.use_ada_layer_norm_zero:
|
||||
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
|
||||
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
|
||||
)
|
||||
else:
|
||||
norm_hidden_states = self.norm1(hidden_states)
|
||||
|
||||
# 1. Self-Attention
|
||||
cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
|
||||
if self.only_cross_attention:
|
||||
attn_output = self.attn1(
|
||||
norm_hidden_states,
|
||||
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
|
||||
attention_mask=attention_mask,
|
||||
**cross_attention_kwargs,
|
||||
)
|
||||
else:
|
||||
if MODE == "write":
|
||||
self.bank.append(norm_hidden_states.detach().clone())
|
||||
attn_output = self.attn1(
|
||||
norm_hidden_states,
|
||||
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
|
||||
attention_mask=attention_mask,
|
||||
**cross_attention_kwargs,
|
||||
)
|
||||
if MODE == "read":
|
||||
if attention_auto_machine_weight > self.attn_weight:
|
||||
attn_output_uc = self.attn1(
|
||||
norm_hidden_states,
|
||||
encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),
|
||||
# attention_mask=attention_mask,
|
||||
**cross_attention_kwargs,
|
||||
)
|
||||
attn_output_c = attn_output_uc.clone()
|
||||
if do_classifier_free_guidance and style_fidelity > 0:
|
||||
attn_output_c[uc_mask] = self.attn1(
|
||||
norm_hidden_states[uc_mask],
|
||||
encoder_hidden_states=norm_hidden_states[uc_mask],
|
||||
**cross_attention_kwargs,
|
||||
)
|
||||
attn_output = style_fidelity * attn_output_c + (1.0 - style_fidelity) * attn_output_uc
|
||||
self.bank.clear()
|
||||
else:
|
||||
attn_output = self.attn1(
|
||||
norm_hidden_states,
|
||||
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
|
||||
attention_mask=attention_mask,
|
||||
**cross_attention_kwargs,
|
||||
)
|
||||
if self.use_ada_layer_norm_zero:
|
||||
attn_output = gate_msa.unsqueeze(1) * attn_output
|
||||
hidden_states = attn_output + hidden_states
|
||||
|
||||
if self.attn2 is not None:
|
||||
norm_hidden_states = (
|
||||
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
|
||||
)
|
||||
|
||||
# 2. Cross-Attention
|
||||
attn_output = self.attn2(
|
||||
norm_hidden_states,
|
||||
encoder_hidden_states=encoder_hidden_states,
|
||||
attention_mask=encoder_attention_mask,
|
||||
**cross_attention_kwargs,
|
||||
)
|
||||
hidden_states = attn_output + hidden_states
|
||||
|
||||
# 3. Feed-forward
|
||||
norm_hidden_states = self.norm3(hidden_states)
|
||||
|
||||
if self.use_ada_layer_norm_zero:
|
||||
norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
|
||||
|
||||
ff_output = self.ff(norm_hidden_states)
|
||||
|
||||
if self.use_ada_layer_norm_zero:
|
||||
ff_output = gate_mlp.unsqueeze(1) * ff_output
|
||||
|
||||
hidden_states = ff_output + hidden_states
|
||||
|
||||
return hidden_states
|
||||
|
||||
def hacked_mid_forward(self, *args, **kwargs):
|
||||
eps = 1e-6
|
||||
x = self.original_forward(*args, **kwargs)
|
||||
if MODE == "write":
|
||||
if gn_auto_machine_weight >= self.gn_weight:
|
||||
var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)
|
||||
self.mean_bank.append(mean)
|
||||
self.var_bank.append(var)
|
||||
if MODE == "read":
|
||||
if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
|
||||
var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)
|
||||
std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
|
||||
mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))
|
||||
var_acc = sum(self.var_bank) / float(len(self.var_bank))
|
||||
std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
|
||||
x_uc = (((x - mean) / std) * std_acc) + mean_acc
|
||||
x_c = x_uc.clone()
|
||||
if do_classifier_free_guidance and style_fidelity > 0:
|
||||
x_c[uc_mask] = x[uc_mask]
|
||||
x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc
|
||||
self.mean_bank = []
|
||||
self.var_bank = []
|
||||
return x
|
||||
|
||||
def hack_CrossAttnDownBlock2D_forward(
|
||||
self,
|
||||
hidden_states: torch.FloatTensor,
|
||||
temb: Optional[torch.FloatTensor] = None,
|
||||
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
||||
attention_mask: Optional[torch.FloatTensor] = None,
|
||||
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
||||
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
||||
):
|
||||
eps = 1e-6
|
||||
|
||||
# TODO(Patrick, William) - attention mask is not used
|
||||
output_states = ()
|
||||
|
||||
for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):
|
||||
hidden_states = resnet(hidden_states, temb)
|
||||
hidden_states = attn(
|
||||
hidden_states,
|
||||
encoder_hidden_states=encoder_hidden_states,
|
||||
cross_attention_kwargs=cross_attention_kwargs,
|
||||
attention_mask=attention_mask,
|
||||
encoder_attention_mask=encoder_attention_mask,
|
||||
return_dict=False,
|
||||
)[0]
|
||||
if MODE == "write":
|
||||
if gn_auto_machine_weight >= self.gn_weight:
|
||||
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
|
||||
self.mean_bank.append([mean])
|
||||
self.var_bank.append([var])
|
||||
if MODE == "read":
|
||||
if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
|
||||
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
|
||||
std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
|
||||
mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
|
||||
var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
|
||||
std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
|
||||
hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
|
||||
hidden_states_c = hidden_states_uc.clone()
|
||||
if do_classifier_free_guidance and style_fidelity > 0:
|
||||
hidden_states_c[uc_mask] = hidden_states[uc_mask]
|
||||
hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
|
||||
|
||||
output_states = output_states + (hidden_states,)
|
||||
|
||||
if MODE == "read":
|
||||
self.mean_bank = []
|
||||
self.var_bank = []
|
||||
|
||||
if self.downsamplers is not None:
|
||||
for downsampler in self.downsamplers:
|
||||
hidden_states = downsampler(hidden_states)
|
||||
|
||||
output_states = output_states + (hidden_states,)
|
||||
|
||||
return hidden_states, output_states
|
||||
|
||||
def hacked_DownBlock2D_forward(self, hidden_states, temb=None):
|
||||
eps = 1e-6
|
||||
|
||||
output_states = ()
|
||||
|
||||
for i, resnet in enumerate(self.resnets):
|
||||
hidden_states = resnet(hidden_states, temb)
|
||||
|
||||
if MODE == "write":
|
||||
if gn_auto_machine_weight >= self.gn_weight:
|
||||
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
|
||||
self.mean_bank.append([mean])
|
||||
self.var_bank.append([var])
|
||||
if MODE == "read":
|
||||
if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
|
||||
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
|
||||
std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
|
||||
mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
|
||||
var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
|
||||
std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
|
||||
hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
|
||||
hidden_states_c = hidden_states_uc.clone()
|
||||
if do_classifier_free_guidance and style_fidelity > 0:
|
||||
hidden_states_c[uc_mask] = hidden_states[uc_mask]
|
||||
hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
|
||||
|
||||
output_states = output_states + (hidden_states,)
|
||||
|
||||
if MODE == "read":
|
||||
self.mean_bank = []
|
||||
self.var_bank = []
|
||||
|
||||
if self.downsamplers is not None:
|
||||
for downsampler in self.downsamplers:
|
||||
hidden_states = downsampler(hidden_states)
|
||||
|
||||
output_states = output_states + (hidden_states,)
|
||||
|
||||
return hidden_states, output_states
|
||||
|
||||
def hacked_CrossAttnUpBlock2D_forward(
|
||||
self,
|
||||
hidden_states: torch.FloatTensor,
|
||||
res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
|
||||
temb: Optional[torch.FloatTensor] = None,
|
||||
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
||||
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
||||
upsample_size: Optional[int] = None,
|
||||
attention_mask: Optional[torch.FloatTensor] = None,
|
||||
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
||||
):
|
||||
eps = 1e-6
|
||||
# TODO(Patrick, William) - attention mask is not used
|
||||
for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):
|
||||
# pop res hidden states
|
||||
res_hidden_states = res_hidden_states_tuple[-1]
|
||||
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
|
||||
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
|
||||
hidden_states = resnet(hidden_states, temb)
|
||||
hidden_states = attn(
|
||||
hidden_states,
|
||||
encoder_hidden_states=encoder_hidden_states,
|
||||
cross_attention_kwargs=cross_attention_kwargs,
|
||||
attention_mask=attention_mask,
|
||||
encoder_attention_mask=encoder_attention_mask,
|
||||
return_dict=False,
|
||||
)[0]
|
||||
|
||||
if MODE == "write":
|
||||
if gn_auto_machine_weight >= self.gn_weight:
|
||||
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
|
||||
self.mean_bank.append([mean])
|
||||
self.var_bank.append([var])
|
||||
if MODE == "read":
|
||||
if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
|
||||
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
|
||||
std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
|
||||
mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
|
||||
var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
|
||||
std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
|
||||
hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
|
||||
hidden_states_c = hidden_states_uc.clone()
|
||||
if do_classifier_free_guidance and style_fidelity > 0:
|
||||
hidden_states_c[uc_mask] = hidden_states[uc_mask]
|
||||
hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
|
||||
|
||||
if MODE == "read":
|
||||
self.mean_bank = []
|
||||
self.var_bank = []
|
||||
|
||||
if self.upsamplers is not None:
|
||||
for upsampler in self.upsamplers:
|
||||
hidden_states = upsampler(hidden_states, upsample_size)
|
||||
|
||||
return hidden_states
|
||||
|
||||
def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):
|
||||
eps = 1e-6
|
||||
for i, resnet in enumerate(self.resnets):
|
||||
# pop res hidden states
|
||||
res_hidden_states = res_hidden_states_tuple[-1]
|
||||
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
|
||||
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
|
||||
hidden_states = resnet(hidden_states, temb)
|
||||
|
||||
if MODE == "write":
|
||||
if gn_auto_machine_weight >= self.gn_weight:
|
||||
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
|
||||
self.mean_bank.append([mean])
|
||||
self.var_bank.append([var])
|
||||
if MODE == "read":
|
||||
if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
|
||||
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
|
||||
std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
|
||||
mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
|
||||
var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
|
||||
std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
|
||||
hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
|
||||
hidden_states_c = hidden_states_uc.clone()
|
||||
if do_classifier_free_guidance and style_fidelity > 0:
|
||||
hidden_states_c[uc_mask] = hidden_states[uc_mask]
|
||||
hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
|
||||
|
||||
if MODE == "read":
|
||||
self.mean_bank = []
|
||||
self.var_bank = []
|
||||
|
||||
if self.upsamplers is not None:
|
||||
for upsampler in self.upsamplers:
|
||||
hidden_states = upsampler(hidden_states, upsample_size)
|
||||
|
||||
return hidden_states
|
||||
|
||||
if reference_attn:
|
||||
attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock)]
|
||||
attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])
|
||||
|
||||
for i, module in enumerate(attn_modules):
|
||||
module._original_inner_forward = module.forward
|
||||
module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)
|
||||
module.bank = []
|
||||
module.attn_weight = float(i) / float(len(attn_modules))
|
||||
|
||||
if reference_adain:
|
||||
gn_modules = [self.unet.mid_block]
|
||||
self.unet.mid_block.gn_weight = 0
|
||||
|
||||
down_blocks = self.unet.down_blocks
|
||||
for w, module in enumerate(down_blocks):
|
||||
module.gn_weight = 1.0 - float(w) / float(len(down_blocks))
|
||||
gn_modules.append(module)
|
||||
|
||||
up_blocks = self.unet.up_blocks
|
||||
for w, module in enumerate(up_blocks):
|
||||
module.gn_weight = float(w) / float(len(up_blocks))
|
||||
gn_modules.append(module)
|
||||
|
||||
for i, module in enumerate(gn_modules):
|
||||
if getattr(module, "original_forward", None) is None:
|
||||
module.original_forward = module.forward
|
||||
if i == 0:
|
||||
# mid_block
|
||||
module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)
|
||||
elif isinstance(module, CrossAttnDownBlock2D):
|
||||
module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)
|
||||
elif isinstance(module, DownBlock2D):
|
||||
module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)
|
||||
elif isinstance(module, CrossAttnUpBlock2D):
|
||||
module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)
|
||||
elif isinstance(module, UpBlock2D):
|
||||
module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)
|
||||
module.mean_bank = []
|
||||
module.var_bank = []
|
||||
module.gn_weight *= 2
|
||||
|
||||
# 11. Denoising loop
|
||||
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
||||
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
||||
for i, t in enumerate(timesteps):
|
||||
# expand the latents if we are doing classifier free guidance
|
||||
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
||||
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
||||
|
||||
# controlnet(s) inference
|
||||
if guess_mode and do_classifier_free_guidance:
|
||||
# Infer ControlNet only for the conditional batch.
|
||||
controlnet_latent_model_input = latents
|
||||
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
|
||||
else:
|
||||
controlnet_latent_model_input = latent_model_input
|
||||
controlnet_prompt_embeds = prompt_embeds
|
||||
|
||||
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
||||
controlnet_latent_model_input,
|
||||
t,
|
||||
encoder_hidden_states=controlnet_prompt_embeds,
|
||||
controlnet_cond=image,
|
||||
conditioning_scale=controlnet_conditioning_scale,
|
||||
guess_mode=guess_mode,
|
||||
return_dict=False,
|
||||
)
|
||||
|
||||
if guess_mode and do_classifier_free_guidance:
|
||||
# Infered ControlNet only for the conditional batch.
|
||||
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
||||
# add 0 to the unconditional batch to keep it unchanged.
|
||||
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
||||
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
|
||||
|
||||
# ref only part
|
||||
noise = randn_tensor(
|
||||
ref_image_latents.shape, generator=generator, device=device, dtype=ref_image_latents.dtype
|
||||
)
|
||||
ref_xt = self.scheduler.add_noise(
|
||||
ref_image_latents,
|
||||
noise,
|
||||
t.reshape(
|
||||
1,
|
||||
),
|
||||
)
|
||||
ref_xt = self.scheduler.scale_model_input(ref_xt, t)
|
||||
|
||||
MODE = "write"
|
||||
self.unet(
|
||||
ref_xt,
|
||||
t,
|
||||
encoder_hidden_states=prompt_embeds,
|
||||
cross_attention_kwargs=cross_attention_kwargs,
|
||||
return_dict=False,
|
||||
)
|
||||
|
||||
# predict the noise residual
|
||||
MODE = "read"
|
||||
noise_pred = self.unet(
|
||||
latent_model_input,
|
||||
t,
|
||||
encoder_hidden_states=prompt_embeds,
|
||||
cross_attention_kwargs=cross_attention_kwargs,
|
||||
down_block_additional_residuals=down_block_res_samples,
|
||||
mid_block_additional_residual=mid_block_res_sample,
|
||||
return_dict=False,
|
||||
)[0]
|
||||
|
||||
# perform guidance
|
||||
if do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
||||
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
||||
|
||||
# compute the previous noisy sample x_t -> x_t-1
|
||||
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
||||
|
||||
# call the callback, if provided
|
||||
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
||||
progress_bar.update()
|
||||
if callback is not None and i % callback_steps == 0:
|
||||
callback(i, t, latents)
|
||||
|
||||
# If we do sequential model offloading, let's offload unet and controlnet
|
||||
# manually for max memory savings
|
||||
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
||||
self.unet.to("cpu")
|
||||
self.controlnet.to("cpu")
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
if not output_type == "latent":
|
||||
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
||||
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
||||
else:
|
||||
image = latents
|
||||
has_nsfw_concept = None
|
||||
|
||||
if has_nsfw_concept is None:
|
||||
do_denormalize = [True] * image.shape[0]
|
||||
else:
|
||||
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
||||
|
||||
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
||||
|
||||
# Offload last model to CPU
|
||||
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
||||
self.final_offload_hook.offload()
|
||||
|
||||
if not return_dict:
|
||||
return (image, has_nsfw_concept)
|
||||
|
||||
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
||||
848
examples/community/stable_diffusion_ipex.py
Normal file
848
examples/community/stable_diffusion_ipex.py
Normal file
@@ -0,0 +1,848 @@
|
||||
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
from typing import Any, Callable, Dict, List, Optional, Union
|
||||
|
||||
import intel_extension_for_pytorch as ipex
|
||||
import torch
|
||||
from packaging import version
|
||||
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
||||
|
||||
from diffusers.configuration_utils import FrozenDict
|
||||
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
||||
from diffusers.pipeline_utils import DiffusionPipeline
|
||||
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
||||
from diffusers.schedulers import KarrasDiffusionSchedulers
|
||||
from diffusers.utils import (
|
||||
deprecate,
|
||||
is_accelerate_available,
|
||||
is_accelerate_version,
|
||||
logging,
|
||||
randn_tensor,
|
||||
replace_example_docstring,
|
||||
)
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
||||
|
||||
EXAMPLE_DOC_STRING = """
|
||||
Examples:
|
||||
```py
|
||||
>>> import torch
|
||||
>>> from diffusers import StableDiffusionPipeline
|
||||
|
||||
>>> pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", custom_pipeline="stable_diffusion_ipex")
|
||||
|
||||
>>> # For Float32
|
||||
>>> pipe.prepare_for_ipex(prompt, dtype=torch.float32, height=512, width=512) #value of image height/width should be consistent with the pipeline inference
|
||||
>>> # For BFloat16
|
||||
>>> pipe.prepare_for_ipex(prompt, dtype=torch.bfloat16, height=512, width=512) #value of image height/width should be consistent with the pipeline inference
|
||||
|
||||
>>> prompt = "a photo of an astronaut riding a horse on mars"
|
||||
>>> # For Float32
|
||||
>>> image = pipe(prompt, num_inference_steps=num_inference_steps, height=512, width=512).images[0] #value of image height/width should be consistent with 'prepare_for_ipex()'
|
||||
>>> # For BFloat16
|
||||
>>> with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
|
||||
>>> image = pipe(prompt, num_inference_steps=num_inference_steps, height=512, width=512).images[0] #value of image height/width should be consistent with 'prepare_for_ipex()'
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
class StableDiffusionIPEXPipeline(DiffusionPipeline):
|
||||
r"""
|
||||
Pipeline for text-to-image generation using Stable Diffusion on IPEX.
|
||||
|
||||
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
||||
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
||||
|
||||
Args:
|
||||
vae ([`AutoencoderKL`]):
|
||||
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
||||
text_encoder ([`CLIPTextModel`]):
|
||||
Frozen text-encoder. Stable Diffusion uses the text portion of
|
||||
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
||||
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
||||
tokenizer (`CLIPTokenizer`):
|
||||
Tokenizer of class
|
||||
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
||||
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
||||
scheduler ([`SchedulerMixin`]):
|
||||
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
||||
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
||||
safety_checker ([`StableDiffusionSafetyChecker`]):
|
||||
Classification module that estimates whether generated images could be considered offensive or harmful.
|
||||
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
||||
feature_extractor ([`CLIPFeatureExtractor`]):
|
||||
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
||||
"""
|
||||
_optional_components = ["safety_checker", "feature_extractor"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vae: AutoencoderKL,
|
||||
text_encoder: CLIPTextModel,
|
||||
tokenizer: CLIPTokenizer,
|
||||
unet: UNet2DConditionModel,
|
||||
scheduler: KarrasDiffusionSchedulers,
|
||||
safety_checker: StableDiffusionSafetyChecker,
|
||||
feature_extractor: CLIPFeatureExtractor,
|
||||
requires_safety_checker: bool = True,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
||||
deprecation_message = (
|
||||
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
||||
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
||||
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
||||
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
||||
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
||||
" file"
|
||||
)
|
||||
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
||||
new_config = dict(scheduler.config)
|
||||
new_config["steps_offset"] = 1
|
||||
scheduler._internal_dict = FrozenDict(new_config)
|
||||
|
||||
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
|
||||
deprecation_message = (
|
||||
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
||||
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
||||
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
||||
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
||||
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
||||
)
|
||||
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
||||
new_config = dict(scheduler.config)
|
||||
new_config["clip_sample"] = False
|
||||
scheduler._internal_dict = FrozenDict(new_config)
|
||||
|
||||
if safety_checker is None and requires_safety_checker:
|
||||
logger.warning(
|
||||
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
||||
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
||||
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
||||
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
||||
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
||||
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
||||
)
|
||||
|
||||
if safety_checker is not None and feature_extractor is None:
|
||||
raise ValueError(
|
||||
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
||||
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
||||
)
|
||||
|
||||
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
|
||||
version.parse(unet.config._diffusers_version).base_version
|
||||
) < version.parse("0.9.0.dev0")
|
||||
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
||||
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
||||
deprecation_message = (
|
||||
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
||||
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
||||
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
||||
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
||||
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
||||
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
||||
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
||||
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
||||
" the `unet/config.json` file"
|
||||
)
|
||||
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
||||
new_config = dict(unet.config)
|
||||
new_config["sample_size"] = 64
|
||||
unet._internal_dict = FrozenDict(new_config)
|
||||
|
||||
self.register_modules(
|
||||
vae=vae,
|
||||
text_encoder=text_encoder,
|
||||
tokenizer=tokenizer,
|
||||
unet=unet,
|
||||
scheduler=scheduler,
|
||||
safety_checker=safety_checker,
|
||||
feature_extractor=feature_extractor,
|
||||
)
|
||||
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
||||
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
||||
|
||||
def get_input_example(self, prompt, height=None, width=None, guidance_scale=7.5, num_images_per_prompt=1):
|
||||
prompt_embeds = None
|
||||
negative_prompt_embeds = None
|
||||
negative_prompt = None
|
||||
callback_steps = 1
|
||||
generator = None
|
||||
latents = None
|
||||
|
||||
# 0. Default height and width to unet
|
||||
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
||||
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
||||
|
||||
# 1. Check inputs. Raise error if not correct
|
||||
self.check_inputs(
|
||||
prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
|
||||
)
|
||||
|
||||
# 2. Define call parameters
|
||||
if prompt is not None and isinstance(prompt, str):
|
||||
batch_size = 1
|
||||
elif prompt is not None and isinstance(prompt, list):
|
||||
batch_size = len(prompt)
|
||||
|
||||
device = "cpu"
|
||||
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
||||
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
||||
# corresponds to doing no classifier free guidance.
|
||||
do_classifier_free_guidance = guidance_scale > 1.0
|
||||
|
||||
# 3. Encode input prompt
|
||||
prompt_embeds = self._encode_prompt(
|
||||
prompt,
|
||||
device,
|
||||
num_images_per_prompt,
|
||||
do_classifier_free_guidance,
|
||||
negative_prompt,
|
||||
prompt_embeds=prompt_embeds,
|
||||
negative_prompt_embeds=negative_prompt_embeds,
|
||||
)
|
||||
|
||||
# 5. Prepare latent variables
|
||||
latents = self.prepare_latents(
|
||||
batch_size * num_images_per_prompt,
|
||||
self.unet.in_channels,
|
||||
height,
|
||||
width,
|
||||
prompt_embeds.dtype,
|
||||
device,
|
||||
generator,
|
||||
latents,
|
||||
)
|
||||
dummy = torch.ones(1, dtype=torch.int32)
|
||||
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
||||
latent_model_input = self.scheduler.scale_model_input(latent_model_input, dummy)
|
||||
|
||||
unet_input_example = (latent_model_input, dummy, prompt_embeds)
|
||||
vae_decoder_input_example = latents
|
||||
|
||||
return unet_input_example, vae_decoder_input_example
|
||||
|
||||
def prepare_for_ipex(self, promt, dtype=torch.float32, height=None, width=None, guidance_scale=7.5):
|
||||
self.unet = self.unet.to(memory_format=torch.channels_last)
|
||||
self.vae.decoder = self.vae.decoder.to(memory_format=torch.channels_last)
|
||||
self.text_encoder = self.text_encoder.to(memory_format=torch.channels_last)
|
||||
if self.safety_checker is not None:
|
||||
self.safety_checker = self.safety_checker.to(memory_format=torch.channels_last)
|
||||
|
||||
unet_input_example, vae_decoder_input_example = self.get_input_example(promt, height, width, guidance_scale)
|
||||
|
||||
# optimize with ipex
|
||||
if dtype == torch.bfloat16:
|
||||
self.unet = ipex.optimize(
|
||||
self.unet.eval(), dtype=torch.bfloat16, inplace=True, sample_input=unet_input_example
|
||||
)
|
||||
self.vae.decoder = ipex.optimize(self.vae.decoder.eval(), dtype=torch.bfloat16, inplace=True)
|
||||
self.text_encoder = ipex.optimize(self.text_encoder.eval(), dtype=torch.bfloat16, inplace=True)
|
||||
if self.safety_checker is not None:
|
||||
self.safety_checker = ipex.optimize(self.safety_checker.eval(), dtype=torch.bfloat16, inplace=True)
|
||||
elif dtype == torch.float32:
|
||||
self.unet = ipex.optimize(
|
||||
self.unet.eval(),
|
||||
dtype=torch.float32,
|
||||
inplace=True,
|
||||
sample_input=unet_input_example,
|
||||
level="O1",
|
||||
weights_prepack=True,
|
||||
auto_kernel_selection=False,
|
||||
)
|
||||
self.vae.decoder = ipex.optimize(
|
||||
self.vae.decoder.eval(),
|
||||
dtype=torch.float32,
|
||||
inplace=True,
|
||||
level="O1",
|
||||
weights_prepack=True,
|
||||
auto_kernel_selection=False,
|
||||
)
|
||||
self.text_encoder = ipex.optimize(
|
||||
self.text_encoder.eval(),
|
||||
dtype=torch.float32,
|
||||
inplace=True,
|
||||
level="O1",
|
||||
weights_prepack=True,
|
||||
auto_kernel_selection=False,
|
||||
)
|
||||
if self.safety_checker is not None:
|
||||
self.safety_checker = ipex.optimize(
|
||||
self.safety_checker.eval(),
|
||||
dtype=torch.float32,
|
||||
inplace=True,
|
||||
level="O1",
|
||||
weights_prepack=True,
|
||||
auto_kernel_selection=False,
|
||||
)
|
||||
else:
|
||||
raise ValueError(" The value of 'dtype' should be 'torch.bfloat16' or 'torch.float32' !")
|
||||
|
||||
# trace unet model to get better performance on IPEX
|
||||
with torch.cpu.amp.autocast(enabled=dtype == torch.bfloat16), torch.no_grad():
|
||||
unet_trace_model = torch.jit.trace(self.unet, unet_input_example, check_trace=False, strict=False)
|
||||
unet_trace_model = torch.jit.freeze(unet_trace_model)
|
||||
self.unet.forward = unet_trace_model.forward
|
||||
|
||||
# trace vae.decoder model to get better performance on IPEX
|
||||
with torch.cpu.amp.autocast(enabled=dtype == torch.bfloat16), torch.no_grad():
|
||||
ave_decoder_trace_model = torch.jit.trace(
|
||||
self.vae.decoder, vae_decoder_input_example, check_trace=False, strict=False
|
||||
)
|
||||
ave_decoder_trace_model = torch.jit.freeze(ave_decoder_trace_model)
|
||||
self.vae.decoder.forward = ave_decoder_trace_model.forward
|
||||
|
||||
def enable_vae_slicing(self):
|
||||
r"""
|
||||
Enable sliced VAE decoding.
|
||||
|
||||
When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
|
||||
steps. This is useful to save some memory and allow larger batch sizes.
|
||||
"""
|
||||
self.vae.enable_slicing()
|
||||
|
||||
def disable_vae_slicing(self):
|
||||
r"""
|
||||
Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
|
||||
computing decoding in one step.
|
||||
"""
|
||||
self.vae.disable_slicing()
|
||||
|
||||
def enable_vae_tiling(self):
|
||||
r"""
|
||||
Enable tiled VAE decoding.
|
||||
|
||||
When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
|
||||
several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
|
||||
"""
|
||||
self.vae.enable_tiling()
|
||||
|
||||
def disable_vae_tiling(self):
|
||||
r"""
|
||||
Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
|
||||
computing decoding in one step.
|
||||
"""
|
||||
self.vae.disable_tiling()
|
||||
|
||||
def enable_sequential_cpu_offload(self, gpu_id=0):
|
||||
r"""
|
||||
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
|
||||
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
|
||||
`torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
|
||||
Note that offloading happens on a submodule basis. Memory savings are higher than with
|
||||
`enable_model_cpu_offload`, but performance is lower.
|
||||
"""
|
||||
if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
|
||||
from accelerate import cpu_offload
|
||||
else:
|
||||
raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
|
||||
|
||||
device = torch.device(f"cuda:{gpu_id}")
|
||||
|
||||
if self.device.type != "cpu":
|
||||
self.to("cpu", silence_dtype_warnings=True)
|
||||
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
|
||||
|
||||
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
|
||||
cpu_offload(cpu_offloaded_model, device)
|
||||
|
||||
if self.safety_checker is not None:
|
||||
cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
|
||||
|
||||
def enable_model_cpu_offload(self, gpu_id=0):
|
||||
r"""
|
||||
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
|
||||
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
|
||||
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
|
||||
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
|
||||
"""
|
||||
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
||||
from accelerate import cpu_offload_with_hook
|
||||
else:
|
||||
raise ImportError("`enable_model_offload` requires `accelerate v0.17.0` or higher.")
|
||||
|
||||
device = torch.device(f"cuda:{gpu_id}")
|
||||
|
||||
if self.device.type != "cpu":
|
||||
self.to("cpu", silence_dtype_warnings=True)
|
||||
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
|
||||
|
||||
hook = None
|
||||
for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
|
||||
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
|
||||
|
||||
if self.safety_checker is not None:
|
||||
_, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
|
||||
|
||||
# We'll offload the last model manually.
|
||||
self.final_offload_hook = hook
|
||||
|
||||
@property
|
||||
def _execution_device(self):
|
||||
r"""
|
||||
Returns the device on which the pipeline's models will be executed. After calling
|
||||
`pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
|
||||
hooks.
|
||||
"""
|
||||
if not hasattr(self.unet, "_hf_hook"):
|
||||
return self.device
|
||||
for module in self.unet.modules():
|
||||
if (
|
||||
hasattr(module, "_hf_hook")
|
||||
and hasattr(module._hf_hook, "execution_device")
|
||||
and module._hf_hook.execution_device is not None
|
||||
):
|
||||
return torch.device(module._hf_hook.execution_device)
|
||||
return self.device
|
||||
|
||||
def _encode_prompt(
|
||||
self,
|
||||
prompt,
|
||||
device,
|
||||
num_images_per_prompt,
|
||||
do_classifier_free_guidance,
|
||||
negative_prompt=None,
|
||||
prompt_embeds: Optional[torch.FloatTensor] = None,
|
||||
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
||||
):
|
||||
r"""
|
||||
Encodes the prompt into text encoder hidden states.
|
||||
|
||||
Args:
|
||||
prompt (`str` or `List[str]`, *optional*):
|
||||
prompt to be encoded
|
||||
device: (`torch.device`):
|
||||
torch device
|
||||
num_images_per_prompt (`int`):
|
||||
number of images that should be generated per prompt
|
||||
do_classifier_free_guidance (`bool`):
|
||||
whether to use classifier free guidance or not
|
||||
negative_prompt (`str` or `List[str]`, *optional*):
|
||||
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
||||
`negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
|
||||
Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
|
||||
prompt_embeds (`torch.FloatTensor`, *optional*):
|
||||
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
||||
provided, text embeddings will be generated from `prompt` input argument.
|
||||
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
||||
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
||||
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
||||
argument.
|
||||
"""
|
||||
if prompt is not None and isinstance(prompt, str):
|
||||
batch_size = 1
|
||||
elif prompt is not None and isinstance(prompt, list):
|
||||
batch_size = len(prompt)
|
||||
else:
|
||||
batch_size = prompt_embeds.shape[0]
|
||||
|
||||
if prompt_embeds is None:
|
||||
text_inputs = self.tokenizer(
|
||||
prompt,
|
||||
padding="max_length",
|
||||
max_length=self.tokenizer.model_max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
text_input_ids = text_inputs.input_ids
|
||||
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
||||
|
||||
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
||||
text_input_ids, untruncated_ids
|
||||
):
|
||||
removed_text = self.tokenizer.batch_decode(
|
||||
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
||||
)
|
||||
logger.warning(
|
||||
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
||||
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
||||
)
|
||||
|
||||
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
||||
attention_mask = text_inputs.attention_mask.to(device)
|
||||
else:
|
||||
attention_mask = None
|
||||
|
||||
prompt_embeds = self.text_encoder(
|
||||
text_input_ids.to(device),
|
||||
attention_mask=attention_mask,
|
||||
)
|
||||
prompt_embeds = prompt_embeds[0]
|
||||
|
||||
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
||||
|
||||
bs_embed, seq_len, _ = prompt_embeds.shape
|
||||
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
||||
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
||||
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
||||
|
||||
# get unconditional embeddings for classifier free guidance
|
||||
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
||||
uncond_tokens: List[str]
|
||||
if negative_prompt is None:
|
||||
uncond_tokens = [""] * batch_size
|
||||
elif type(prompt) is not type(negative_prompt):
|
||||
raise TypeError(
|
||||
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
||||
f" {type(prompt)}."
|
||||
)
|
||||
elif isinstance(negative_prompt, str):
|
||||
uncond_tokens = [negative_prompt]
|
||||
elif batch_size != len(negative_prompt):
|
||||
raise ValueError(
|
||||
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
||||
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
||||
" the batch size of `prompt`."
|
||||
)
|
||||
else:
|
||||
uncond_tokens = negative_prompt
|
||||
|
||||
max_length = prompt_embeds.shape[1]
|
||||
uncond_input = self.tokenizer(
|
||||
uncond_tokens,
|
||||
padding="max_length",
|
||||
max_length=max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
||||
attention_mask = uncond_input.attention_mask.to(device)
|
||||
else:
|
||||
attention_mask = None
|
||||
|
||||
negative_prompt_embeds = self.text_encoder(
|
||||
uncond_input.input_ids.to(device),
|
||||
attention_mask=attention_mask,
|
||||
)
|
||||
negative_prompt_embeds = negative_prompt_embeds[0]
|
||||
|
||||
if do_classifier_free_guidance:
|
||||
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
||||
seq_len = negative_prompt_embeds.shape[1]
|
||||
|
||||
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
||||
|
||||
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
||||
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
||||
|
||||
# For classifier free guidance, we need to do two forward passes.
|
||||
# Here we concatenate the unconditional and text embeddings into a single batch
|
||||
# to avoid doing two forward passes
|
||||
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
||||
|
||||
return prompt_embeds
|
||||
|
||||
def run_safety_checker(self, image, device, dtype):
|
||||
if self.safety_checker is not None:
|
||||
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
|
||||
image, has_nsfw_concept = self.safety_checker(
|
||||
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
||||
)
|
||||
else:
|
||||
has_nsfw_concept = None
|
||||
return image, has_nsfw_concept
|
||||
|
||||
def decode_latents(self, latents):
|
||||
latents = 1 / self.vae.config.scaling_factor * latents
|
||||
image = self.vae.decode(latents).sample
|
||||
image = (image / 2 + 0.5).clamp(0, 1)
|
||||
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
||||
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
||||
return image
|
||||
|
||||
def prepare_extra_step_kwargs(self, generator, eta):
|
||||
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
||||
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
||||
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
||||
# and should be between [0, 1]
|
||||
|
||||
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
||||
extra_step_kwargs = {}
|
||||
if accepts_eta:
|
||||
extra_step_kwargs["eta"] = eta
|
||||
|
||||
# check if the scheduler accepts generator
|
||||
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
||||
if accepts_generator:
|
||||
extra_step_kwargs["generator"] = generator
|
||||
return extra_step_kwargs
|
||||
|
||||
def check_inputs(
|
||||
self,
|
||||
prompt,
|
||||
height,
|
||||
width,
|
||||
callback_steps,
|
||||
negative_prompt=None,
|
||||
prompt_embeds=None,
|
||||
negative_prompt_embeds=None,
|
||||
):
|
||||
if height % 8 != 0 or width % 8 != 0:
|
||||
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
||||
|
||||
if (callback_steps is None) or (
|
||||
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
||||
):
|
||||
raise ValueError(
|
||||
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
||||
f" {type(callback_steps)}."
|
||||
)
|
||||
|
||||
if prompt is not None and prompt_embeds is not None:
|
||||
raise ValueError(
|
||||
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
||||
" only forward one of the two."
|
||||
)
|
||||
elif prompt is None and prompt_embeds is None:
|
||||
raise ValueError(
|
||||
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
||||
)
|
||||
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
||||
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
||||
|
||||
if negative_prompt is not None and negative_prompt_embeds is not None:
|
||||
raise ValueError(
|
||||
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
||||
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
||||
)
|
||||
|
||||
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
||||
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
||||
raise ValueError(
|
||||
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
||||
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
||||
f" {negative_prompt_embeds.shape}."
|
||||
)
|
||||
|
||||
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
||||
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
||||
if isinstance(generator, list) and len(generator) != batch_size:
|
||||
raise ValueError(
|
||||
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
||||
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
||||
)
|
||||
|
||||
if latents is None:
|
||||
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
||||
else:
|
||||
latents = latents.to(device)
|
||||
|
||||
# scale the initial noise by the standard deviation required by the scheduler
|
||||
latents = latents * self.scheduler.init_noise_sigma
|
||||
return latents
|
||||
|
||||
@torch.no_grad()
|
||||
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
||||
def __call__(
|
||||
self,
|
||||
prompt: Union[str, List[str]] = None,
|
||||
height: Optional[int] = None,
|
||||
width: Optional[int] = None,
|
||||
num_inference_steps: int = 50,
|
||||
guidance_scale: float = 7.5,
|
||||
negative_prompt: Optional[Union[str, List[str]]] = None,
|
||||
num_images_per_prompt: Optional[int] = 1,
|
||||
eta: float = 0.0,
|
||||
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
||||
latents: Optional[torch.FloatTensor] = None,
|
||||
prompt_embeds: Optional[torch.FloatTensor] = None,
|
||||
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
||||
output_type: Optional[str] = "pil",
|
||||
return_dict: bool = True,
|
||||
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
||||
callback_steps: int = 1,
|
||||
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
||||
):
|
||||
r"""
|
||||
Function invoked when calling the pipeline for generation.
|
||||
|
||||
Args:
|
||||
prompt (`str` or `List[str]`, *optional*):
|
||||
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
||||
instead.
|
||||
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
||||
The height in pixels of the generated image.
|
||||
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
||||
The width in pixels of the generated image.
|
||||
num_inference_steps (`int`, *optional*, defaults to 50):
|
||||
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
||||
expense of slower inference.
|
||||
guidance_scale (`float`, *optional*, defaults to 7.5):
|
||||
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
||||
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
||||
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
||||
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
||||
usually at the expense of lower image quality.
|
||||
negative_prompt (`str` or `List[str]`, *optional*):
|
||||
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
||||
`negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
|
||||
Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
|
||||
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
||||
The number of images to generate per prompt.
|
||||
eta (`float`, *optional*, defaults to 0.0):
|
||||
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
||||
[`schedulers.DDIMScheduler`], will be ignored for others.
|
||||
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
||||
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
||||
to make generation deterministic.
|
||||
latents (`torch.FloatTensor`, *optional*):
|
||||
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
||||
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
||||
tensor will ge generated by sampling using the supplied random `generator`.
|
||||
prompt_embeds (`torch.FloatTensor`, *optional*):
|
||||
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
||||
provided, text embeddings will be generated from `prompt` input argument.
|
||||
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
||||
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
||||
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
||||
argument.
|
||||
output_type (`str`, *optional*, defaults to `"pil"`):
|
||||
The output format of the generate image. Choose between
|
||||
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
||||
return_dict (`bool`, *optional*, defaults to `True`):
|
||||
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
||||
plain tuple.
|
||||
callback (`Callable`, *optional*):
|
||||
A function that will be called every `callback_steps` steps during inference. The function will be
|
||||
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
||||
callback_steps (`int`, *optional*, defaults to 1):
|
||||
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
||||
called at every step.
|
||||
cross_attention_kwargs (`dict`, *optional*):
|
||||
A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under
|
||||
`self.processor` in
|
||||
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
|
||||
|
||||
Examples:
|
||||
|
||||
Returns:
|
||||
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
||||
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
||||
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
||||
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
||||
(nsfw) content, according to the `safety_checker`.
|
||||
"""
|
||||
# 0. Default height and width to unet
|
||||
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
||||
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
||||
|
||||
# 1. Check inputs. Raise error if not correct
|
||||
self.check_inputs(
|
||||
prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
|
||||
)
|
||||
|
||||
# 2. Define call parameters
|
||||
if prompt is not None and isinstance(prompt, str):
|
||||
batch_size = 1
|
||||
elif prompt is not None and isinstance(prompt, list):
|
||||
batch_size = len(prompt)
|
||||
else:
|
||||
batch_size = prompt_embeds.shape[0]
|
||||
|
||||
device = self._execution_device
|
||||
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
||||
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
||||
# corresponds to doing no classifier free guidance.
|
||||
do_classifier_free_guidance = guidance_scale > 1.0
|
||||
|
||||
# 3. Encode input prompt
|
||||
prompt_embeds = self._encode_prompt(
|
||||
prompt,
|
||||
device,
|
||||
num_images_per_prompt,
|
||||
do_classifier_free_guidance,
|
||||
negative_prompt,
|
||||
prompt_embeds=prompt_embeds,
|
||||
negative_prompt_embeds=negative_prompt_embeds,
|
||||
)
|
||||
|
||||
# 4. Prepare timesteps
|
||||
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
||||
timesteps = self.scheduler.timesteps
|
||||
|
||||
# 5. Prepare latent variables
|
||||
num_channels_latents = self.unet.in_channels
|
||||
latents = self.prepare_latents(
|
||||
batch_size * num_images_per_prompt,
|
||||
num_channels_latents,
|
||||
height,
|
||||
width,
|
||||
prompt_embeds.dtype,
|
||||
device,
|
||||
generator,
|
||||
latents,
|
||||
)
|
||||
|
||||
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
||||
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
||||
|
||||
# 7. Denoising loop
|
||||
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
||||
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
||||
for i, t in enumerate(timesteps):
|
||||
# expand the latents if we are doing classifier free guidance
|
||||
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
||||
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
||||
|
||||
# predict the noise residual
|
||||
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds)["sample"]
|
||||
|
||||
# perform guidance
|
||||
if do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
||||
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
||||
|
||||
# compute the previous noisy sample x_t -> x_t-1
|
||||
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
||||
|
||||
# call the callback, if provided
|
||||
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
||||
progress_bar.update()
|
||||
if callback is not None and i % callback_steps == 0:
|
||||
callback(i, t, latents)
|
||||
|
||||
if output_type == "latent":
|
||||
image = latents
|
||||
has_nsfw_concept = None
|
||||
elif output_type == "pil":
|
||||
# 8. Post-processing
|
||||
image = self.decode_latents(latents)
|
||||
|
||||
# 9. Run safety checker
|
||||
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
||||
|
||||
# 10. Convert to PIL
|
||||
image = self.numpy_to_pil(image)
|
||||
else:
|
||||
# 8. Post-processing
|
||||
image = self.decode_latents(latents)
|
||||
|
||||
# 9. Run safety checker
|
||||
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
||||
|
||||
# Offload last model to CPU
|
||||
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
||||
self.final_offload_hook.offload()
|
||||
|
||||
if not return_dict:
|
||||
return (image, has_nsfw_concept)
|
||||
|
||||
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
||||
781
examples/community/stable_diffusion_reference.py
Normal file
781
examples/community/stable_diffusion_reference.py
Normal file
@@ -0,0 +1,781 @@
|
||||
# Inspired by: https://github.com/Mikubill/sd-webui-controlnet/discussions/1236 and https://github.com/Mikubill/sd-webui-controlnet/discussions/1280
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
import PIL.Image
|
||||
import torch
|
||||
|
||||
from diffusers import StableDiffusionPipeline
|
||||
from diffusers.models.attention import BasicTransformerBlock
|
||||
from diffusers.models.unet_2d_blocks import CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, UpBlock2D
|
||||
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
||||
from diffusers.utils import PIL_INTERPOLATION, logging, randn_tensor
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
||||
|
||||
EXAMPLE_DOC_STRING = """
|
||||
Examples:
|
||||
```py
|
||||
>>> import torch
|
||||
>>> from diffusers import UniPCMultistepScheduler
|
||||
>>> from diffusers.utils import load_image
|
||||
|
||||
>>> input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png")
|
||||
|
||||
>>> pipe = StableDiffusionReferencePipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
safety_checker=None,
|
||||
torch_dtype=torch.float16
|
||||
).to('cuda:0')
|
||||
|
||||
>>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe_controlnet.scheduler.config)
|
||||
|
||||
>>> result_img = pipe(ref_image=input_image,
|
||||
prompt="1girl",
|
||||
num_inference_steps=20,
|
||||
reference_attn=True,
|
||||
reference_adain=True).images[0]
|
||||
|
||||
>>> result_img.show()
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
def torch_dfs(model: torch.nn.Module):
|
||||
result = [model]
|
||||
for child in model.children():
|
||||
result += torch_dfs(child)
|
||||
return result
|
||||
|
||||
|
||||
class StableDiffusionReferencePipeline(StableDiffusionPipeline):
|
||||
def _default_height_width(self, height, width, image):
|
||||
# NOTE: It is possible that a list of images have different
|
||||
# dimensions for each image, so just checking the first image
|
||||
# is not _exactly_ correct, but it is simple.
|
||||
while isinstance(image, list):
|
||||
image = image[0]
|
||||
|
||||
if height is None:
|
||||
if isinstance(image, PIL.Image.Image):
|
||||
height = image.height
|
||||
elif isinstance(image, torch.Tensor):
|
||||
height = image.shape[2]
|
||||
|
||||
height = (height // 8) * 8 # round down to nearest multiple of 8
|
||||
|
||||
if width is None:
|
||||
if isinstance(image, PIL.Image.Image):
|
||||
width = image.width
|
||||
elif isinstance(image, torch.Tensor):
|
||||
width = image.shape[3]
|
||||
|
||||
width = (width // 8) * 8 # round down to nearest multiple of 8
|
||||
|
||||
return height, width
|
||||
|
||||
def prepare_image(
|
||||
self,
|
||||
image,
|
||||
width,
|
||||
height,
|
||||
batch_size,
|
||||
num_images_per_prompt,
|
||||
device,
|
||||
dtype,
|
||||
do_classifier_free_guidance=False,
|
||||
guess_mode=False,
|
||||
):
|
||||
if not isinstance(image, torch.Tensor):
|
||||
if isinstance(image, PIL.Image.Image):
|
||||
image = [image]
|
||||
|
||||
if isinstance(image[0], PIL.Image.Image):
|
||||
images = []
|
||||
|
||||
for image_ in image:
|
||||
image_ = image_.convert("RGB")
|
||||
image_ = image_.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])
|
||||
image_ = np.array(image_)
|
||||
image_ = image_[None, :]
|
||||
images.append(image_)
|
||||
|
||||
image = images
|
||||
|
||||
image = np.concatenate(image, axis=0)
|
||||
image = np.array(image).astype(np.float32) / 255.0
|
||||
image = (image - 0.5) / 0.5
|
||||
image = image.transpose(0, 3, 1, 2)
|
||||
image = torch.from_numpy(image)
|
||||
elif isinstance(image[0], torch.Tensor):
|
||||
image = torch.cat(image, dim=0)
|
||||
|
||||
image_batch_size = image.shape[0]
|
||||
|
||||
if image_batch_size == 1:
|
||||
repeat_by = batch_size
|
||||
else:
|
||||
# image batch size is the same as prompt batch size
|
||||
repeat_by = num_images_per_prompt
|
||||
|
||||
image = image.repeat_interleave(repeat_by, dim=0)
|
||||
|
||||
image = image.to(device=device, dtype=dtype)
|
||||
|
||||
if do_classifier_free_guidance and not guess_mode:
|
||||
image = torch.cat([image] * 2)
|
||||
|
||||
return image
|
||||
|
||||
def prepare_ref_latents(self, refimage, batch_size, dtype, device, generator, do_classifier_free_guidance):
|
||||
refimage = refimage.to(device=device, dtype=dtype)
|
||||
|
||||
# encode the mask image into latents space so we can concatenate it to the latents
|
||||
if isinstance(generator, list):
|
||||
ref_image_latents = [
|
||||
self.vae.encode(refimage[i : i + 1]).latent_dist.sample(generator=generator[i])
|
||||
for i in range(batch_size)
|
||||
]
|
||||
ref_image_latents = torch.cat(ref_image_latents, dim=0)
|
||||
else:
|
||||
ref_image_latents = self.vae.encode(refimage).latent_dist.sample(generator=generator)
|
||||
ref_image_latents = self.vae.config.scaling_factor * ref_image_latents
|
||||
|
||||
# duplicate mask and ref_image_latents for each generation per prompt, using mps friendly method
|
||||
if ref_image_latents.shape[0] < batch_size:
|
||||
if not batch_size % ref_image_latents.shape[0] == 0:
|
||||
raise ValueError(
|
||||
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
|
||||
f" to a total batch size of {batch_size}, but {ref_image_latents.shape[0]} images were passed."
|
||||
" Make sure the number of images that you pass is divisible by the total requested batch size."
|
||||
)
|
||||
ref_image_latents = ref_image_latents.repeat(batch_size // ref_image_latents.shape[0], 1, 1, 1)
|
||||
|
||||
ref_image_latents = torch.cat([ref_image_latents] * 2) if do_classifier_free_guidance else ref_image_latents
|
||||
|
||||
# aligning device to prevent device errors when concating it with the latent model input
|
||||
ref_image_latents = ref_image_latents.to(device=device, dtype=dtype)
|
||||
return ref_image_latents
|
||||
|
||||
@torch.no_grad()
|
||||
def __call__(
|
||||
self,
|
||||
prompt: Union[str, List[str]] = None,
|
||||
ref_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
||||
height: Optional[int] = None,
|
||||
width: Optional[int] = None,
|
||||
num_inference_steps: int = 50,
|
||||
guidance_scale: float = 7.5,
|
||||
negative_prompt: Optional[Union[str, List[str]]] = None,
|
||||
num_images_per_prompt: Optional[int] = 1,
|
||||
eta: float = 0.0,
|
||||
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
||||
latents: Optional[torch.FloatTensor] = None,
|
||||
prompt_embeds: Optional[torch.FloatTensor] = None,
|
||||
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
||||
output_type: Optional[str] = "pil",
|
||||
return_dict: bool = True,
|
||||
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
||||
callback_steps: int = 1,
|
||||
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
||||
attention_auto_machine_weight: float = 1.0,
|
||||
gn_auto_machine_weight: float = 1.0,
|
||||
style_fidelity: float = 0.5,
|
||||
reference_attn: bool = True,
|
||||
reference_adain: bool = True,
|
||||
):
|
||||
r"""
|
||||
Function invoked when calling the pipeline for generation.
|
||||
|
||||
Args:
|
||||
prompt (`str` or `List[str]`, *optional*):
|
||||
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
||||
instead.
|
||||
ref_image (`torch.FloatTensor`, `PIL.Image.Image`):
|
||||
The Reference Control input condition. Reference Control uses this input condition to generate guidance to Unet. If
|
||||
the type is specified as `Torch.FloatTensor`, it is passed to Reference Control as is. `PIL.Image.Image` can
|
||||
also be accepted as an image.
|
||||
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
||||
The height in pixels of the generated image.
|
||||
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
||||
The width in pixels of the generated image.
|
||||
num_inference_steps (`int`, *optional*, defaults to 50):
|
||||
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
||||
expense of slower inference.
|
||||
guidance_scale (`float`, *optional*, defaults to 7.5):
|
||||
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
||||
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
||||
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
||||
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
||||
usually at the expense of lower image quality.
|
||||
negative_prompt (`str` or `List[str]`, *optional*):
|
||||
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
||||
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
||||
less than `1`).
|
||||
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
||||
The number of images to generate per prompt.
|
||||
eta (`float`, *optional*, defaults to 0.0):
|
||||
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
||||
[`schedulers.DDIMScheduler`], will be ignored for others.
|
||||
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
||||
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
||||
to make generation deterministic.
|
||||
latents (`torch.FloatTensor`, *optional*):
|
||||
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
||||
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
||||
tensor will ge generated by sampling using the supplied random `generator`.
|
||||
prompt_embeds (`torch.FloatTensor`, *optional*):
|
||||
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
||||
provided, text embeddings will be generated from `prompt` input argument.
|
||||
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
||||
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
||||
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
||||
argument.
|
||||
output_type (`str`, *optional*, defaults to `"pil"`):
|
||||
The output format of the generate image. Choose between
|
||||
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
||||
return_dict (`bool`, *optional*, defaults to `True`):
|
||||
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
||||
plain tuple.
|
||||
callback (`Callable`, *optional*):
|
||||
A function that will be called every `callback_steps` steps during inference. The function will be
|
||||
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
||||
callback_steps (`int`, *optional*, defaults to 1):
|
||||
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
||||
called at every step.
|
||||
cross_attention_kwargs (`dict`, *optional*):
|
||||
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
||||
`self.processor` in
|
||||
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
|
||||
attention_auto_machine_weight (`float`):
|
||||
Weight of using reference query for self attention's context.
|
||||
If attention_auto_machine_weight=1.0, use reference query for all self attention's context.
|
||||
gn_auto_machine_weight (`float`):
|
||||
Weight of using reference adain. If gn_auto_machine_weight=2.0, use all reference adain plugins.
|
||||
style_fidelity (`float`):
|
||||
style fidelity of ref_uncond_xt. If style_fidelity=1.0, control more important,
|
||||
elif style_fidelity=0.0, prompt more important, else balanced.
|
||||
reference_attn (`bool`):
|
||||
Whether to use reference query for self attention's context.
|
||||
reference_adain (`bool`):
|
||||
Whether to use reference adain.
|
||||
|
||||
Examples:
|
||||
|
||||
Returns:
|
||||
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
||||
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
||||
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
||||
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
||||
(nsfw) content, according to the `safety_checker`.
|
||||
"""
|
||||
assert reference_attn or reference_adain, "`reference_attn` or `reference_adain` must be True."
|
||||
|
||||
# 0. Default height and width to unet
|
||||
height, width = self._default_height_width(height, width, ref_image)
|
||||
|
||||
# 1. Check inputs. Raise error if not correct
|
||||
self.check_inputs(
|
||||
prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
|
||||
)
|
||||
|
||||
# 2. Define call parameters
|
||||
if prompt is not None and isinstance(prompt, str):
|
||||
batch_size = 1
|
||||
elif prompt is not None and isinstance(prompt, list):
|
||||
batch_size = len(prompt)
|
||||
else:
|
||||
batch_size = prompt_embeds.shape[0]
|
||||
|
||||
device = self._execution_device
|
||||
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
||||
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
||||
# corresponds to doing no classifier free guidance.
|
||||
do_classifier_free_guidance = guidance_scale > 1.0
|
||||
|
||||
# 3. Encode input prompt
|
||||
prompt_embeds = self._encode_prompt(
|
||||
prompt,
|
||||
device,
|
||||
num_images_per_prompt,
|
||||
do_classifier_free_guidance,
|
||||
negative_prompt,
|
||||
prompt_embeds=prompt_embeds,
|
||||
negative_prompt_embeds=negative_prompt_embeds,
|
||||
)
|
||||
|
||||
# 4. Preprocess reference image
|
||||
ref_image = self.prepare_image(
|
||||
image=ref_image,
|
||||
width=width,
|
||||
height=height,
|
||||
batch_size=batch_size * num_images_per_prompt,
|
||||
num_images_per_prompt=num_images_per_prompt,
|
||||
device=device,
|
||||
dtype=prompt_embeds.dtype,
|
||||
)
|
||||
|
||||
# 5. Prepare timesteps
|
||||
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
||||
timesteps = self.scheduler.timesteps
|
||||
|
||||
# 6. Prepare latent variables
|
||||
num_channels_latents = self.unet.config.in_channels
|
||||
latents = self.prepare_latents(
|
||||
batch_size * num_images_per_prompt,
|
||||
num_channels_latents,
|
||||
height,
|
||||
width,
|
||||
prompt_embeds.dtype,
|
||||
device,
|
||||
generator,
|
||||
latents,
|
||||
)
|
||||
|
||||
# 7. Prepare reference latent variables
|
||||
ref_image_latents = self.prepare_ref_latents(
|
||||
ref_image,
|
||||
batch_size * num_images_per_prompt,
|
||||
prompt_embeds.dtype,
|
||||
device,
|
||||
generator,
|
||||
do_classifier_free_guidance,
|
||||
)
|
||||
|
||||
# 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
||||
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
||||
|
||||
# 9. Modify self attention and group norm
|
||||
MODE = "write"
|
||||
uc_mask = (
|
||||
torch.Tensor([1] * batch_size * num_images_per_prompt + [0] * batch_size * num_images_per_prompt)
|
||||
.type_as(ref_image_latents)
|
||||
.bool()
|
||||
)
|
||||
|
||||
def hacked_basic_transformer_inner_forward(
|
||||
self,
|
||||
hidden_states: torch.FloatTensor,
|
||||
attention_mask: Optional[torch.FloatTensor] = None,
|
||||
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
||||
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
||||
timestep: Optional[torch.LongTensor] = None,
|
||||
cross_attention_kwargs: Dict[str, Any] = None,
|
||||
class_labels: Optional[torch.LongTensor] = None,
|
||||
):
|
||||
if self.use_ada_layer_norm:
|
||||
norm_hidden_states = self.norm1(hidden_states, timestep)
|
||||
elif self.use_ada_layer_norm_zero:
|
||||
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
|
||||
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
|
||||
)
|
||||
else:
|
||||
norm_hidden_states = self.norm1(hidden_states)
|
||||
|
||||
# 1. Self-Attention
|
||||
cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
|
||||
if self.only_cross_attention:
|
||||
attn_output = self.attn1(
|
||||
norm_hidden_states,
|
||||
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
|
||||
attention_mask=attention_mask,
|
||||
**cross_attention_kwargs,
|
||||
)
|
||||
else:
|
||||
if MODE == "write":
|
||||
self.bank.append(norm_hidden_states.detach().clone())
|
||||
attn_output = self.attn1(
|
||||
norm_hidden_states,
|
||||
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
|
||||
attention_mask=attention_mask,
|
||||
**cross_attention_kwargs,
|
||||
)
|
||||
if MODE == "read":
|
||||
if attention_auto_machine_weight > self.attn_weight:
|
||||
attn_output_uc = self.attn1(
|
||||
norm_hidden_states,
|
||||
encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),
|
||||
# attention_mask=attention_mask,
|
||||
**cross_attention_kwargs,
|
||||
)
|
||||
attn_output_c = attn_output_uc.clone()
|
||||
if do_classifier_free_guidance and style_fidelity > 0:
|
||||
attn_output_c[uc_mask] = self.attn1(
|
||||
norm_hidden_states[uc_mask],
|
||||
encoder_hidden_states=norm_hidden_states[uc_mask],
|
||||
**cross_attention_kwargs,
|
||||
)
|
||||
attn_output = style_fidelity * attn_output_c + (1.0 - style_fidelity) * attn_output_uc
|
||||
self.bank.clear()
|
||||
else:
|
||||
attn_output = self.attn1(
|
||||
norm_hidden_states,
|
||||
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
|
||||
attention_mask=attention_mask,
|
||||
**cross_attention_kwargs,
|
||||
)
|
||||
if self.use_ada_layer_norm_zero:
|
||||
attn_output = gate_msa.unsqueeze(1) * attn_output
|
||||
hidden_states = attn_output + hidden_states
|
||||
|
||||
if self.attn2 is not None:
|
||||
norm_hidden_states = (
|
||||
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
|
||||
)
|
||||
|
||||
# 2. Cross-Attention
|
||||
attn_output = self.attn2(
|
||||
norm_hidden_states,
|
||||
encoder_hidden_states=encoder_hidden_states,
|
||||
attention_mask=encoder_attention_mask,
|
||||
**cross_attention_kwargs,
|
||||
)
|
||||
hidden_states = attn_output + hidden_states
|
||||
|
||||
# 3. Feed-forward
|
||||
norm_hidden_states = self.norm3(hidden_states)
|
||||
|
||||
if self.use_ada_layer_norm_zero:
|
||||
norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
|
||||
|
||||
ff_output = self.ff(norm_hidden_states)
|
||||
|
||||
if self.use_ada_layer_norm_zero:
|
||||
ff_output = gate_mlp.unsqueeze(1) * ff_output
|
||||
|
||||
hidden_states = ff_output + hidden_states
|
||||
|
||||
return hidden_states
|
||||
|
||||
def hacked_mid_forward(self, *args, **kwargs):
|
||||
eps = 1e-6
|
||||
x = self.original_forward(*args, **kwargs)
|
||||
if MODE == "write":
|
||||
if gn_auto_machine_weight >= self.gn_weight:
|
||||
var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)
|
||||
self.mean_bank.append(mean)
|
||||
self.var_bank.append(var)
|
||||
if MODE == "read":
|
||||
if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
|
||||
var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)
|
||||
std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
|
||||
mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))
|
||||
var_acc = sum(self.var_bank) / float(len(self.var_bank))
|
||||
std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
|
||||
x_uc = (((x - mean) / std) * std_acc) + mean_acc
|
||||
x_c = x_uc.clone()
|
||||
if do_classifier_free_guidance and style_fidelity > 0:
|
||||
x_c[uc_mask] = x[uc_mask]
|
||||
x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc
|
||||
self.mean_bank = []
|
||||
self.var_bank = []
|
||||
return x
|
||||
|
||||
def hack_CrossAttnDownBlock2D_forward(
|
||||
self,
|
||||
hidden_states: torch.FloatTensor,
|
||||
temb: Optional[torch.FloatTensor] = None,
|
||||
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
||||
attention_mask: Optional[torch.FloatTensor] = None,
|
||||
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
||||
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
||||
):
|
||||
eps = 1e-6
|
||||
|
||||
# TODO(Patrick, William) - attention mask is not used
|
||||
output_states = ()
|
||||
|
||||
for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):
|
||||
hidden_states = resnet(hidden_states, temb)
|
||||
hidden_states = attn(
|
||||
hidden_states,
|
||||
encoder_hidden_states=encoder_hidden_states,
|
||||
cross_attention_kwargs=cross_attention_kwargs,
|
||||
attention_mask=attention_mask,
|
||||
encoder_attention_mask=encoder_attention_mask,
|
||||
return_dict=False,
|
||||
)[0]
|
||||
if MODE == "write":
|
||||
if gn_auto_machine_weight >= self.gn_weight:
|
||||
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
|
||||
self.mean_bank.append([mean])
|
||||
self.var_bank.append([var])
|
||||
if MODE == "read":
|
||||
if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
|
||||
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
|
||||
std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
|
||||
mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
|
||||
var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
|
||||
std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
|
||||
hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
|
||||
hidden_states_c = hidden_states_uc.clone()
|
||||
if do_classifier_free_guidance and style_fidelity > 0:
|
||||
hidden_states_c[uc_mask] = hidden_states[uc_mask]
|
||||
hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
|
||||
|
||||
output_states = output_states + (hidden_states,)
|
||||
|
||||
if MODE == "read":
|
||||
self.mean_bank = []
|
||||
self.var_bank = []
|
||||
|
||||
if self.downsamplers is not None:
|
||||
for downsampler in self.downsamplers:
|
||||
hidden_states = downsampler(hidden_states)
|
||||
|
||||
output_states = output_states + (hidden_states,)
|
||||
|
||||
return hidden_states, output_states
|
||||
|
||||
def hacked_DownBlock2D_forward(self, hidden_states, temb=None):
|
||||
eps = 1e-6
|
||||
|
||||
output_states = ()
|
||||
|
||||
for i, resnet in enumerate(self.resnets):
|
||||
hidden_states = resnet(hidden_states, temb)
|
||||
|
||||
if MODE == "write":
|
||||
if gn_auto_machine_weight >= self.gn_weight:
|
||||
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
|
||||
self.mean_bank.append([mean])
|
||||
self.var_bank.append([var])
|
||||
if MODE == "read":
|
||||
if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
|
||||
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
|
||||
std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
|
||||
mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
|
||||
var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
|
||||
std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
|
||||
hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
|
||||
hidden_states_c = hidden_states_uc.clone()
|
||||
if do_classifier_free_guidance and style_fidelity > 0:
|
||||
hidden_states_c[uc_mask] = hidden_states[uc_mask]
|
||||
hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
|
||||
|
||||
output_states = output_states + (hidden_states,)
|
||||
|
||||
if MODE == "read":
|
||||
self.mean_bank = []
|
||||
self.var_bank = []
|
||||
|
||||
if self.downsamplers is not None:
|
||||
for downsampler in self.downsamplers:
|
||||
hidden_states = downsampler(hidden_states)
|
||||
|
||||
output_states = output_states + (hidden_states,)
|
||||
|
||||
return hidden_states, output_states
|
||||
|
||||
def hacked_CrossAttnUpBlock2D_forward(
|
||||
self,
|
||||
hidden_states: torch.FloatTensor,
|
||||
res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
|
||||
temb: Optional[torch.FloatTensor] = None,
|
||||
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
||||
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
||||
upsample_size: Optional[int] = None,
|
||||
attention_mask: Optional[torch.FloatTensor] = None,
|
||||
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
||||
):
|
||||
eps = 1e-6
|
||||
# TODO(Patrick, William) - attention mask is not used
|
||||
for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):
|
||||
# pop res hidden states
|
||||
res_hidden_states = res_hidden_states_tuple[-1]
|
||||
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
|
||||
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
|
||||
hidden_states = resnet(hidden_states, temb)
|
||||
hidden_states = attn(
|
||||
hidden_states,
|
||||
encoder_hidden_states=encoder_hidden_states,
|
||||
cross_attention_kwargs=cross_attention_kwargs,
|
||||
attention_mask=attention_mask,
|
||||
encoder_attention_mask=encoder_attention_mask,
|
||||
return_dict=False,
|
||||
)[0]
|
||||
|
||||
if MODE == "write":
|
||||
if gn_auto_machine_weight >= self.gn_weight:
|
||||
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
|
||||
self.mean_bank.append([mean])
|
||||
self.var_bank.append([var])
|
||||
if MODE == "read":
|
||||
if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
|
||||
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
|
||||
std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
|
||||
mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
|
||||
var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
|
||||
std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
|
||||
hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
|
||||
hidden_states_c = hidden_states_uc.clone()
|
||||
if do_classifier_free_guidance and style_fidelity > 0:
|
||||
hidden_states_c[uc_mask] = hidden_states[uc_mask]
|
||||
hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
|
||||
|
||||
if MODE == "read":
|
||||
self.mean_bank = []
|
||||
self.var_bank = []
|
||||
|
||||
if self.upsamplers is not None:
|
||||
for upsampler in self.upsamplers:
|
||||
hidden_states = upsampler(hidden_states, upsample_size)
|
||||
|
||||
return hidden_states
|
||||
|
||||
def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):
|
||||
eps = 1e-6
|
||||
for i, resnet in enumerate(self.resnets):
|
||||
# pop res hidden states
|
||||
res_hidden_states = res_hidden_states_tuple[-1]
|
||||
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
|
||||
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
|
||||
hidden_states = resnet(hidden_states, temb)
|
||||
|
||||
if MODE == "write":
|
||||
if gn_auto_machine_weight >= self.gn_weight:
|
||||
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
|
||||
self.mean_bank.append([mean])
|
||||
self.var_bank.append([var])
|
||||
if MODE == "read":
|
||||
if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
|
||||
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
|
||||
std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
|
||||
mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
|
||||
var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
|
||||
std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
|
||||
hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
|
||||
hidden_states_c = hidden_states_uc.clone()
|
||||
if do_classifier_free_guidance and style_fidelity > 0:
|
||||
hidden_states_c[uc_mask] = hidden_states[uc_mask]
|
||||
hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
|
||||
|
||||
if MODE == "read":
|
||||
self.mean_bank = []
|
||||
self.var_bank = []
|
||||
|
||||
if self.upsamplers is not None:
|
||||
for upsampler in self.upsamplers:
|
||||
hidden_states = upsampler(hidden_states, upsample_size)
|
||||
|
||||
return hidden_states
|
||||
|
||||
if reference_attn:
|
||||
attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock)]
|
||||
attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])
|
||||
|
||||
for i, module in enumerate(attn_modules):
|
||||
module._original_inner_forward = module.forward
|
||||
module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)
|
||||
module.bank = []
|
||||
module.attn_weight = float(i) / float(len(attn_modules))
|
||||
|
||||
if reference_adain:
|
||||
gn_modules = [self.unet.mid_block]
|
||||
self.unet.mid_block.gn_weight = 0
|
||||
|
||||
down_blocks = self.unet.down_blocks
|
||||
for w, module in enumerate(down_blocks):
|
||||
module.gn_weight = 1.0 - float(w) / float(len(down_blocks))
|
||||
gn_modules.append(module)
|
||||
|
||||
up_blocks = self.unet.up_blocks
|
||||
for w, module in enumerate(up_blocks):
|
||||
module.gn_weight = float(w) / float(len(up_blocks))
|
||||
gn_modules.append(module)
|
||||
|
||||
for i, module in enumerate(gn_modules):
|
||||
if getattr(module, "original_forward", None) is None:
|
||||
module.original_forward = module.forward
|
||||
if i == 0:
|
||||
# mid_block
|
||||
module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)
|
||||
elif isinstance(module, CrossAttnDownBlock2D):
|
||||
module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)
|
||||
elif isinstance(module, DownBlock2D):
|
||||
module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)
|
||||
elif isinstance(module, CrossAttnUpBlock2D):
|
||||
module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)
|
||||
elif isinstance(module, UpBlock2D):
|
||||
module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)
|
||||
module.mean_bank = []
|
||||
module.var_bank = []
|
||||
module.gn_weight *= 2
|
||||
|
||||
# 10. Denoising loop
|
||||
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
||||
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
||||
for i, t in enumerate(timesteps):
|
||||
# expand the latents if we are doing classifier free guidance
|
||||
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
||||
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
||||
|
||||
# ref only part
|
||||
noise = randn_tensor(
|
||||
ref_image_latents.shape, generator=generator, device=device, dtype=ref_image_latents.dtype
|
||||
)
|
||||
ref_xt = self.scheduler.add_noise(
|
||||
ref_image_latents,
|
||||
noise,
|
||||
t.reshape(
|
||||
1,
|
||||
),
|
||||
)
|
||||
ref_xt = self.scheduler.scale_model_input(ref_xt, t)
|
||||
|
||||
MODE = "write"
|
||||
self.unet(
|
||||
ref_xt,
|
||||
t,
|
||||
encoder_hidden_states=prompt_embeds,
|
||||
cross_attention_kwargs=cross_attention_kwargs,
|
||||
return_dict=False,
|
||||
)
|
||||
|
||||
# predict the noise residual
|
||||
MODE = "read"
|
||||
noise_pred = self.unet(
|
||||
latent_model_input,
|
||||
t,
|
||||
encoder_hidden_states=prompt_embeds,
|
||||
cross_attention_kwargs=cross_attention_kwargs,
|
||||
return_dict=False,
|
||||
)[0]
|
||||
|
||||
# perform guidance
|
||||
if do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
||||
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
||||
|
||||
# compute the previous noisy sample x_t -> x_t-1
|
||||
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
||||
|
||||
# call the callback, if provided
|
||||
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
||||
progress_bar.update()
|
||||
if callback is not None and i % callback_steps == 0:
|
||||
callback(i, t, latents)
|
||||
|
||||
if not output_type == "latent":
|
||||
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
||||
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
||||
else:
|
||||
image = latents
|
||||
has_nsfw_concept = None
|
||||
|
||||
if has_nsfw_concept is None:
|
||||
do_denormalize = [True] * image.shape[0]
|
||||
else:
|
||||
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
||||
|
||||
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
||||
|
||||
# Offload last model to CPU
|
||||
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
||||
self.final_offload_hook.offload()
|
||||
|
||||
if not return_dict:
|
||||
return (image, has_nsfw_concept)
|
||||
|
||||
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
||||
956
examples/community/stable_diffusion_repaint.py
Normal file
956
examples/community/stable_diffusion_repaint.py
Normal file
@@ -0,0 +1,956 @@
|
||||
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
from typing import Callable, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import PIL
|
||||
import torch
|
||||
from packaging import version
|
||||
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
||||
|
||||
from diffusers import AutoencoderKL, DiffusionPipeline, UNet2DConditionModel
|
||||
from diffusers.configuration_utils import FrozenDict, deprecate
|
||||
from diffusers.loaders import LoraLoaderMixin, TextualInversionLoaderMixin
|
||||
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import (
|
||||
StableDiffusionSafetyChecker,
|
||||
)
|
||||
from diffusers.schedulers import KarrasDiffusionSchedulers
|
||||
from diffusers.utils import (
|
||||
is_accelerate_available,
|
||||
is_accelerate_version,
|
||||
logging,
|
||||
randn_tensor,
|
||||
)
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
||||
|
||||
|
||||
def prepare_mask_and_masked_image(image, mask):
|
||||
"""
|
||||
Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
|
||||
converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
|
||||
``image`` and ``1`` for the ``mask``.
|
||||
The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
|
||||
binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
|
||||
Args:
|
||||
image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
|
||||
It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
|
||||
``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
|
||||
mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
|
||||
It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
|
||||
``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
|
||||
Raises:
|
||||
ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
|
||||
should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
|
||||
TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
|
||||
(ot the other way around).
|
||||
Returns:
|
||||
tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
|
||||
dimensions: ``batch x channels x height x width``.
|
||||
"""
|
||||
if isinstance(image, torch.Tensor):
|
||||
if not isinstance(mask, torch.Tensor):
|
||||
raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not")
|
||||
|
||||
# Batch single image
|
||||
if image.ndim == 3:
|
||||
assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
|
||||
image = image.unsqueeze(0)
|
||||
|
||||
# Batch and add channel dim for single mask
|
||||
if mask.ndim == 2:
|
||||
mask = mask.unsqueeze(0).unsqueeze(0)
|
||||
|
||||
# Batch single mask or add channel dim
|
||||
if mask.ndim == 3:
|
||||
# Single batched mask, no channel dim or single mask not batched but channel dim
|
||||
if mask.shape[0] == 1:
|
||||
mask = mask.unsqueeze(0)
|
||||
|
||||
# Batched masks no channel dim
|
||||
else:
|
||||
mask = mask.unsqueeze(1)
|
||||
|
||||
assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
|
||||
assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
|
||||
assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
|
||||
|
||||
# Check image is in [-1, 1]
|
||||
if image.min() < -1 or image.max() > 1:
|
||||
raise ValueError("Image should be in [-1, 1] range")
|
||||
|
||||
# Check mask is in [0, 1]
|
||||
if mask.min() < 0 or mask.max() > 1:
|
||||
raise ValueError("Mask should be in [0, 1] range")
|
||||
|
||||
# Binarize mask
|
||||
mask[mask < 0.5] = 0
|
||||
mask[mask >= 0.5] = 1
|
||||
|
||||
# Image as float32
|
||||
image = image.to(dtype=torch.float32)
|
||||
elif isinstance(mask, torch.Tensor):
|
||||
raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
|
||||
else:
|
||||
# preprocess image
|
||||
if isinstance(image, (PIL.Image.Image, np.ndarray)):
|
||||
image = [image]
|
||||
|
||||
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
|
||||
image = [np.array(i.convert("RGB"))[None, :] for i in image]
|
||||
image = np.concatenate(image, axis=0)
|
||||
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
|
||||
image = np.concatenate([i[None, :] for i in image], axis=0)
|
||||
|
||||
image = image.transpose(0, 3, 1, 2)
|
||||
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
|
||||
|
||||
# preprocess mask
|
||||
if isinstance(mask, (PIL.Image.Image, np.ndarray)):
|
||||
mask = [mask]
|
||||
|
||||
if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
|
||||
mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
|
||||
mask = mask.astype(np.float32) / 255.0
|
||||
elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
|
||||
mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
|
||||
|
||||
mask[mask < 0.5] = 0
|
||||
mask[mask >= 0.5] = 1
|
||||
mask = torch.from_numpy(mask)
|
||||
|
||||
# masked_image = image * (mask >= 0.5)
|
||||
masked_image = image
|
||||
|
||||
return mask, masked_image
|
||||
|
||||
|
||||
class StableDiffusionRepaintPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
|
||||
r"""
|
||||
Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*.
|
||||
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
||||
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
||||
In addition the pipeline inherits the following loading methods:
|
||||
- *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
|
||||
- *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`]
|
||||
as well as the following saving methods:
|
||||
- *LoRA*: [`loaders.LoraLoaderMixin.save_lora_weights`]
|
||||
Args:
|
||||
vae ([`AutoencoderKL`]):
|
||||
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
||||
text_encoder ([`CLIPTextModel`]):
|
||||
Frozen text-encoder. Stable Diffusion uses the text portion of
|
||||
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
||||
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
||||
tokenizer (`CLIPTokenizer`):
|
||||
Tokenizer of class
|
||||
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
||||
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
||||
scheduler ([`SchedulerMixin`]):
|
||||
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
||||
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
||||
safety_checker ([`StableDiffusionSafetyChecker`]):
|
||||
Classification module that estimates whether generated images could be considered offensive or harmful.
|
||||
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
||||
feature_extractor ([`CLIPImageProcessor`]):
|
||||
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
||||
"""
|
||||
_optional_components = ["safety_checker", "feature_extractor"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vae: AutoencoderKL,
|
||||
text_encoder: CLIPTextModel,
|
||||
tokenizer: CLIPTokenizer,
|
||||
unet: UNet2DConditionModel,
|
||||
scheduler: KarrasDiffusionSchedulers,
|
||||
safety_checker: StableDiffusionSafetyChecker,
|
||||
feature_extractor: CLIPImageProcessor,
|
||||
requires_safety_checker: bool = True,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
||||
deprecation_message = (
|
||||
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
||||
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
||||
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
||||
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
||||
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
||||
" file"
|
||||
)
|
||||
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
||||
new_config = dict(scheduler.config)
|
||||
new_config["steps_offset"] = 1
|
||||
scheduler._internal_dict = FrozenDict(new_config)
|
||||
|
||||
if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False:
|
||||
deprecation_message = (
|
||||
f"The configuration file of this scheduler: {scheduler} has not set the configuration"
|
||||
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
|
||||
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
|
||||
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
|
||||
" Hub, it would be very nice if you could open a Pull request for the"
|
||||
" `scheduler/scheduler_config.json` file"
|
||||
)
|
||||
deprecate(
|
||||
"skip_prk_steps not set",
|
||||
"1.0.0",
|
||||
deprecation_message,
|
||||
standard_warn=False,
|
||||
)
|
||||
new_config = dict(scheduler.config)
|
||||
new_config["skip_prk_steps"] = True
|
||||
scheduler._internal_dict = FrozenDict(new_config)
|
||||
|
||||
if safety_checker is None and requires_safety_checker:
|
||||
logger.warning(
|
||||
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
||||
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
||||
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
||||
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
||||
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
||||
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
||||
)
|
||||
|
||||
if safety_checker is not None and feature_extractor is None:
|
||||
raise ValueError(
|
||||
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
||||
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
||||
)
|
||||
|
||||
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
|
||||
version.parse(unet.config._diffusers_version).base_version
|
||||
) < version.parse("0.9.0.dev0")
|
||||
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
||||
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
||||
deprecation_message = (
|
||||
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
||||
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
|
||||
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
||||
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
||||
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
||||
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
||||
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
||||
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
||||
" the `unet/config.json` file"
|
||||
)
|
||||
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
||||
new_config = dict(unet.config)
|
||||
new_config["sample_size"] = 64
|
||||
unet._internal_dict = FrozenDict(new_config)
|
||||
# Check shapes, assume num_channels_latents == 4, num_channels_mask == 1, num_channels_masked == 4
|
||||
if unet.config.in_channels != 4:
|
||||
logger.warning(
|
||||
f"You have loaded a UNet with {unet.config.in_channels} input channels, whereas by default,"
|
||||
f" {self.__class__} assumes that `pipeline.unet` has 4 input channels: 4 for `num_channels_latents`,"
|
||||
". If you did not intend to modify"
|
||||
" this behavior, please check whether you have loaded the right checkpoint."
|
||||
)
|
||||
|
||||
self.register_modules(
|
||||
vae=vae,
|
||||
text_encoder=text_encoder,
|
||||
tokenizer=tokenizer,
|
||||
unet=unet,
|
||||
scheduler=scheduler,
|
||||
safety_checker=safety_checker,
|
||||
feature_extractor=feature_extractor,
|
||||
)
|
||||
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
||||
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
||||
|
||||
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
|
||||
def enable_sequential_cpu_offload(self, gpu_id=0):
|
||||
r"""
|
||||
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
|
||||
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
|
||||
`torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
|
||||
Note that offloading happens on a submodule basis. Memory savings are higher than with
|
||||
`enable_model_cpu_offload`, but performance is lower.
|
||||
"""
|
||||
if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
|
||||
from accelerate import cpu_offload
|
||||
else:
|
||||
raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
|
||||
|
||||
device = torch.device(f"cuda:{gpu_id}")
|
||||
|
||||
if self.device.type != "cpu":
|
||||
self.to("cpu", silence_dtype_warnings=True)
|
||||
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
|
||||
|
||||
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
|
||||
cpu_offload(cpu_offloaded_model, device)
|
||||
|
||||
if self.safety_checker is not None:
|
||||
cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
|
||||
|
||||
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
|
||||
def enable_model_cpu_offload(self, gpu_id=0):
|
||||
r"""
|
||||
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
|
||||
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
|
||||
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
|
||||
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
|
||||
"""
|
||||
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
||||
from accelerate import cpu_offload_with_hook
|
||||
else:
|
||||
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
|
||||
|
||||
device = torch.device(f"cuda:{gpu_id}")
|
||||
|
||||
if self.device.type != "cpu":
|
||||
self.to("cpu", silence_dtype_warnings=True)
|
||||
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
|
||||
|
||||
hook = None
|
||||
for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
|
||||
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
|
||||
|
||||
if self.safety_checker is not None:
|
||||
_, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
|
||||
|
||||
# We'll offload the last model manually.
|
||||
self.final_offload_hook = hook
|
||||
|
||||
@property
|
||||
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
|
||||
def _execution_device(self):
|
||||
r"""
|
||||
Returns the device on which the pipeline's models will be executed. After calling
|
||||
`pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
|
||||
hooks.
|
||||
"""
|
||||
if not hasattr(self.unet, "_hf_hook"):
|
||||
return self.device
|
||||
for module in self.unet.modules():
|
||||
if (
|
||||
hasattr(module, "_hf_hook")
|
||||
and hasattr(module._hf_hook, "execution_device")
|
||||
and module._hf_hook.execution_device is not None
|
||||
):
|
||||
return torch.device(module._hf_hook.execution_device)
|
||||
return self.device
|
||||
|
||||
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
||||
def _encode_prompt(
|
||||
self,
|
||||
prompt,
|
||||
device,
|
||||
num_images_per_prompt,
|
||||
do_classifier_free_guidance,
|
||||
negative_prompt=None,
|
||||
prompt_embeds: Optional[torch.FloatTensor] = None,
|
||||
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
||||
):
|
||||
r"""
|
||||
Encodes the prompt into text encoder hidden states.
|
||||
Args:
|
||||
prompt (`str` or `List[str]`, *optional*):
|
||||
prompt to be encoded
|
||||
device: (`torch.device`):
|
||||
torch device
|
||||
num_images_per_prompt (`int`):
|
||||
number of images that should be generated per prompt
|
||||
do_classifier_free_guidance (`bool`):
|
||||
whether to use classifier free guidance or not
|
||||
negative_prompt (`str` or `List[str]`, *optional*):
|
||||
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
||||
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
||||
less than `1`).
|
||||
prompt_embeds (`torch.FloatTensor`, *optional*):
|
||||
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
||||
provided, text embeddings will be generated from `prompt` input argument.
|
||||
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
||||
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
||||
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
||||
argument.
|
||||
"""
|
||||
if prompt is not None and isinstance(prompt, str):
|
||||
batch_size = 1
|
||||
elif prompt is not None and isinstance(prompt, list):
|
||||
batch_size = len(prompt)
|
||||
else:
|
||||
batch_size = prompt_embeds.shape[0]
|
||||
|
||||
if prompt_embeds is None:
|
||||
# textual inversion: procecss multi-vector tokens if necessary
|
||||
if isinstance(self, TextualInversionLoaderMixin):
|
||||
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
||||
|
||||
text_inputs = self.tokenizer(
|
||||
prompt,
|
||||
padding="max_length",
|
||||
max_length=self.tokenizer.model_max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
text_input_ids = text_inputs.input_ids
|
||||
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
||||
|
||||
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
||||
text_input_ids, untruncated_ids
|
||||
):
|
||||
removed_text = self.tokenizer.batch_decode(
|
||||
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
||||
)
|
||||
logger.warning(
|
||||
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
||||
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
||||
)
|
||||
|
||||
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
||||
attention_mask = text_inputs.attention_mask.to(device)
|
||||
else:
|
||||
attention_mask = None
|
||||
|
||||
prompt_embeds = self.text_encoder(
|
||||
text_input_ids.to(device),
|
||||
attention_mask=attention_mask,
|
||||
)
|
||||
prompt_embeds = prompt_embeds[0]
|
||||
|
||||
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
||||
|
||||
bs_embed, seq_len, _ = prompt_embeds.shape
|
||||
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
||||
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
||||
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
||||
|
||||
# get unconditional embeddings for classifier free guidance
|
||||
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
||||
uncond_tokens: List[str]
|
||||
if negative_prompt is None:
|
||||
uncond_tokens = [""] * batch_size
|
||||
elif type(prompt) is not type(negative_prompt):
|
||||
raise TypeError(
|
||||
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
||||
f" {type(prompt)}."
|
||||
)
|
||||
elif isinstance(negative_prompt, str):
|
||||
uncond_tokens = [negative_prompt]
|
||||
elif batch_size != len(negative_prompt):
|
||||
raise ValueError(
|
||||
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
||||
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
||||
" the batch size of `prompt`."
|
||||
)
|
||||
else:
|
||||
uncond_tokens = negative_prompt
|
||||
|
||||
# textual inversion: procecss multi-vector tokens if necessary
|
||||
if isinstance(self, TextualInversionLoaderMixin):
|
||||
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
||||
|
||||
max_length = prompt_embeds.shape[1]
|
||||
uncond_input = self.tokenizer(
|
||||
uncond_tokens,
|
||||
padding="max_length",
|
||||
max_length=max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
||||
attention_mask = uncond_input.attention_mask.to(device)
|
||||
else:
|
||||
attention_mask = None
|
||||
|
||||
negative_prompt_embeds = self.text_encoder(
|
||||
uncond_input.input_ids.to(device),
|
||||
attention_mask=attention_mask,
|
||||
)
|
||||
negative_prompt_embeds = negative_prompt_embeds[0]
|
||||
|
||||
if do_classifier_free_guidance:
|
||||
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
||||
seq_len = negative_prompt_embeds.shape[1]
|
||||
|
||||
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
||||
|
||||
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
||||
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
||||
|
||||
# For classifier free guidance, we need to do two forward passes.
|
||||
# Here we concatenate the unconditional and text embeddings into a single batch
|
||||
# to avoid doing two forward passes
|
||||
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
||||
|
||||
return prompt_embeds
|
||||
|
||||
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
||||
def run_safety_checker(self, image, device, dtype):
|
||||
if self.safety_checker is not None:
|
||||
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
|
||||
image, has_nsfw_concept = self.safety_checker(
|
||||
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
||||
)
|
||||
else:
|
||||
has_nsfw_concept = None
|
||||
return image, has_nsfw_concept
|
||||
|
||||
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
||||
def prepare_extra_step_kwargs(self, generator, eta):
|
||||
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
||||
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
||||
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
||||
# and should be between [0, 1]
|
||||
|
||||
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
||||
extra_step_kwargs = {}
|
||||
if accepts_eta:
|
||||
extra_step_kwargs["eta"] = eta
|
||||
|
||||
# check if the scheduler accepts generator
|
||||
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
||||
if accepts_generator:
|
||||
extra_step_kwargs["generator"] = generator
|
||||
return extra_step_kwargs
|
||||
|
||||
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
|
||||
def decode_latents(self, latents):
|
||||
latents = 1 / self.vae.config.scaling_factor * latents
|
||||
image = self.vae.decode(latents).sample
|
||||
image = (image / 2 + 0.5).clamp(0, 1)
|
||||
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
||||
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
||||
return image
|
||||
|
||||
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
|
||||
def check_inputs(
|
||||
self,
|
||||
prompt,
|
||||
height,
|
||||
width,
|
||||
callback_steps,
|
||||
negative_prompt=None,
|
||||
prompt_embeds=None,
|
||||
negative_prompt_embeds=None,
|
||||
):
|
||||
if height % 8 != 0 or width % 8 != 0:
|
||||
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
||||
|
||||
if (callback_steps is None) or (
|
||||
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
||||
):
|
||||
raise ValueError(
|
||||
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
||||
f" {type(callback_steps)}."
|
||||
)
|
||||
|
||||
if prompt is not None and prompt_embeds is not None:
|
||||
raise ValueError(
|
||||
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
||||
" only forward one of the two."
|
||||
)
|
||||
elif prompt is None and prompt_embeds is None:
|
||||
raise ValueError(
|
||||
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
||||
)
|
||||
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
||||
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
||||
|
||||
if negative_prompt is not None and negative_prompt_embeds is not None:
|
||||
raise ValueError(
|
||||
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
||||
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
||||
)
|
||||
|
||||
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
||||
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
||||
raise ValueError(
|
||||
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
||||
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
||||
f" {negative_prompt_embeds.shape}."
|
||||
)
|
||||
|
||||
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
||||
def prepare_latents(
|
||||
self,
|
||||
batch_size,
|
||||
num_channels_latents,
|
||||
height,
|
||||
width,
|
||||
dtype,
|
||||
device,
|
||||
generator,
|
||||
latents=None,
|
||||
):
|
||||
shape = (
|
||||
batch_size,
|
||||
num_channels_latents,
|
||||
height // self.vae_scale_factor,
|
||||
width // self.vae_scale_factor,
|
||||
)
|
||||
if isinstance(generator, list) and len(generator) != batch_size:
|
||||
raise ValueError(
|
||||
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
||||
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
||||
)
|
||||
|
||||
if latents is None:
|
||||
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
||||
else:
|
||||
latents = latents.to(device)
|
||||
|
||||
# scale the initial noise by the standard deviation required by the scheduler
|
||||
latents = latents * self.scheduler.init_noise_sigma
|
||||
return latents
|
||||
|
||||
def prepare_mask_latents(
|
||||
self,
|
||||
mask,
|
||||
masked_image,
|
||||
batch_size,
|
||||
height,
|
||||
width,
|
||||
dtype,
|
||||
device,
|
||||
generator,
|
||||
do_classifier_free_guidance,
|
||||
):
|
||||
# resize the mask to latents shape as we concatenate the mask to the latents
|
||||
# we do that before converting to dtype to avoid breaking in case we're using cpu_offload
|
||||
# and half precision
|
||||
mask = torch.nn.functional.interpolate(
|
||||
mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
|
||||
)
|
||||
mask = mask.to(device=device, dtype=dtype)
|
||||
|
||||
masked_image = masked_image.to(device=device, dtype=dtype)
|
||||
|
||||
# encode the mask image into latents space so we can concatenate it to the latents
|
||||
if isinstance(generator, list):
|
||||
masked_image_latents = [
|
||||
self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(generator=generator[i])
|
||||
for i in range(batch_size)
|
||||
]
|
||||
masked_image_latents = torch.cat(masked_image_latents, dim=0)
|
||||
else:
|
||||
masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
|
||||
masked_image_latents = self.vae.config.scaling_factor * masked_image_latents
|
||||
|
||||
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
|
||||
if mask.shape[0] < batch_size:
|
||||
if not batch_size % mask.shape[0] == 0:
|
||||
raise ValueError(
|
||||
"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
|
||||
f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
|
||||
" of masks that you pass is divisible by the total requested batch size."
|
||||
)
|
||||
mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
|
||||
if masked_image_latents.shape[0] < batch_size:
|
||||
if not batch_size % masked_image_latents.shape[0] == 0:
|
||||
raise ValueError(
|
||||
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
|
||||
f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
|
||||
" Make sure the number of images that you pass is divisible by the total requested batch size."
|
||||
)
|
||||
masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
|
||||
|
||||
mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
|
||||
masked_image_latents = (
|
||||
torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
|
||||
)
|
||||
|
||||
# aligning device to prevent device errors when concating it with the latent model input
|
||||
masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
|
||||
return mask, masked_image_latents
|
||||
|
||||
@torch.no_grad()
|
||||
def __call__(
|
||||
self,
|
||||
prompt: Union[str, List[str]] = None,
|
||||
image: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
||||
mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
||||
height: Optional[int] = None,
|
||||
width: Optional[int] = None,
|
||||
num_inference_steps: int = 50,
|
||||
jump_length: Optional[int] = 10,
|
||||
jump_n_sample: Optional[int] = 10,
|
||||
guidance_scale: float = 7.5,
|
||||
negative_prompt: Optional[Union[str, List[str]]] = None,
|
||||
num_images_per_prompt: Optional[int] = 1,
|
||||
eta: float = 0.0,
|
||||
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
||||
latents: Optional[torch.FloatTensor] = None,
|
||||
prompt_embeds: Optional[torch.FloatTensor] = None,
|
||||
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
||||
output_type: Optional[str] = "pil",
|
||||
return_dict: bool = True,
|
||||
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
||||
callback_steps: int = 1,
|
||||
):
|
||||
r"""
|
||||
Function invoked when calling the pipeline for generation.
|
||||
Args:
|
||||
prompt (`str` or `List[str]`, *optional*):
|
||||
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
||||
instead.
|
||||
image (`PIL.Image.Image`):
|
||||
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
|
||||
be masked out with `mask_image` and repainted according to `prompt`.
|
||||
mask_image (`PIL.Image.Image`):
|
||||
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
||||
repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
|
||||
to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
|
||||
instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
||||
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
||||
The height in pixels of the generated image.
|
||||
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
||||
The width in pixels of the generated image.
|
||||
num_inference_steps (`int`, *optional*, defaults to 50):
|
||||
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
||||
expense of slower inference.
|
||||
jump_length (`int`, *optional*, defaults to 10):
|
||||
The number of steps taken forward in time before going backward in time for a single jump ("j" in
|
||||
RePaint paper). Take a look at Figure 9 and 10 in https://arxiv.org/pdf/2201.09865.pdf.
|
||||
jump_n_sample (`int`, *optional*, defaults to 10):
|
||||
The number of times we will make forward time jump for a given chosen time sample. Take a look at
|
||||
Figure 9 and 10 in https://arxiv.org/pdf/2201.09865.pdf.
|
||||
guidance_scale (`float`, *optional*, defaults to 7.5):
|
||||
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
||||
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
||||
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
||||
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
||||
usually at the expense of lower image quality.
|
||||
negative_prompt (`str` or `List[str]`, *optional*):
|
||||
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
||||
`negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale`
|
||||
is less than `1`).
|
||||
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
||||
The number of images to generate per prompt.
|
||||
eta (`float`, *optional*, defaults to 0.0):
|
||||
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
||||
[`schedulers.DDIMScheduler`], will be ignored for others.
|
||||
generator (`torch.Generator`, *optional*):
|
||||
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
||||
to make generation deterministic.
|
||||
latents (`torch.FloatTensor`, *optional*):
|
||||
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
||||
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
||||
tensor will ge generated by sampling using the supplied random `generator`.
|
||||
prompt_embeds (`torch.FloatTensor`, *optional*):
|
||||
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
||||
provided, text embeddings will be generated from `prompt` input argument.
|
||||
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
||||
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
||||
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
||||
argument.
|
||||
output_type (`str`, *optional*, defaults to `"pil"`):
|
||||
The output format of the generate image. Choose between
|
||||
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
||||
return_dict (`bool`, *optional*, defaults to `True`):
|
||||
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
||||
plain tuple.
|
||||
callback (`Callable`, *optional*):
|
||||
A function that will be called every `callback_steps` steps during inference. The function will be
|
||||
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
||||
callback_steps (`int`, *optional*, defaults to 1):
|
||||
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
||||
called at every step.
|
||||
Examples:
|
||||
```py
|
||||
>>> import PIL
|
||||
>>> import requests
|
||||
>>> import torch
|
||||
>>> from io import BytesIO
|
||||
>>> from diffusers import StableDiffusionPipeline, RePaintScheduler
|
||||
>>> def download_image(url):
|
||||
... response = requests.get(url)
|
||||
... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
||||
>>> base_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/"
|
||||
>>> img_url = base_url + "overture-creations-5sI6fQgYIuo.png"
|
||||
>>> mask_url = base_url + "overture-creations-5sI6fQgYIuo_mask.png "
|
||||
>>> init_image = download_image(img_url).resize((512, 512))
|
||||
>>> mask_image = download_image(mask_url).resize((512, 512))
|
||||
>>> pipe = DiffusionPipeline.from_pretrained(
|
||||
... "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, custom_pipeline="stable_diffusion_repaint",
|
||||
... )
|
||||
>>> pipe.scheduler = RePaintScheduler.from_config(pipe.scheduler.config)
|
||||
>>> pipe = pipe.to("cuda")
|
||||
>>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
|
||||
>>> image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
|
||||
```
|
||||
Returns:
|
||||
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
||||
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
||||
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
||||
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
||||
(nsfw) content, according to the `safety_checker`.
|
||||
"""
|
||||
# 0. Default height and width to unet
|
||||
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
||||
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
||||
|
||||
# 1. Check inputs
|
||||
self.check_inputs(
|
||||
prompt,
|
||||
height,
|
||||
width,
|
||||
callback_steps,
|
||||
negative_prompt,
|
||||
prompt_embeds,
|
||||
negative_prompt_embeds,
|
||||
)
|
||||
|
||||
if image is None:
|
||||
raise ValueError("`image` input cannot be undefined.")
|
||||
|
||||
if mask_image is None:
|
||||
raise ValueError("`mask_image` input cannot be undefined.")
|
||||
|
||||
# 2. Define call parameters
|
||||
if prompt is not None and isinstance(prompt, str):
|
||||
batch_size = 1
|
||||
elif prompt is not None and isinstance(prompt, list):
|
||||
batch_size = len(prompt)
|
||||
else:
|
||||
batch_size = prompt_embeds.shape[0]
|
||||
|
||||
device = self._execution_device
|
||||
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
||||
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
||||
# corresponds to doing no classifier free guidance.
|
||||
do_classifier_free_guidance = guidance_scale > 1.0
|
||||
|
||||
# 3. Encode input prompt
|
||||
prompt_embeds = self._encode_prompt(
|
||||
prompt,
|
||||
device,
|
||||
num_images_per_prompt,
|
||||
do_classifier_free_guidance,
|
||||
negative_prompt,
|
||||
prompt_embeds=prompt_embeds,
|
||||
negative_prompt_embeds=negative_prompt_embeds,
|
||||
)
|
||||
|
||||
# 4. Preprocess mask and image
|
||||
mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
|
||||
|
||||
# 5. set timesteps
|
||||
self.scheduler.set_timesteps(num_inference_steps, jump_length, jump_n_sample, device)
|
||||
self.scheduler.eta = eta
|
||||
|
||||
timesteps = self.scheduler.timesteps
|
||||
# latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
||||
|
||||
# 6. Prepare latent variables
|
||||
num_channels_latents = self.vae.config.latent_channels
|
||||
latents = self.prepare_latents(
|
||||
batch_size * num_images_per_prompt,
|
||||
num_channels_latents,
|
||||
height,
|
||||
width,
|
||||
prompt_embeds.dtype,
|
||||
device,
|
||||
generator,
|
||||
latents,
|
||||
)
|
||||
|
||||
# 7. Prepare mask latent variables
|
||||
mask, masked_image_latents = self.prepare_mask_latents(
|
||||
mask,
|
||||
masked_image,
|
||||
batch_size * num_images_per_prompt,
|
||||
height,
|
||||
width,
|
||||
prompt_embeds.dtype,
|
||||
device,
|
||||
generator,
|
||||
do_classifier_free_guidance=False, # We do not need duplicate mask and image
|
||||
)
|
||||
|
||||
# 8. Check that sizes of mask, masked image and latents match
|
||||
# num_channels_mask = mask.shape[1]
|
||||
# num_channels_masked_image = masked_image_latents.shape[1]
|
||||
if num_channels_latents != self.unet.config.in_channels:
|
||||
raise ValueError(
|
||||
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
|
||||
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} "
|
||||
f" = Please verify the config of"
|
||||
" `pipeline.unet` or your `mask_image` or `image` input."
|
||||
)
|
||||
|
||||
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
||||
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
||||
|
||||
t_last = timesteps[0] + 1
|
||||
|
||||
# 10. Denoising loop
|
||||
with self.progress_bar(total=len(timesteps)) as progress_bar:
|
||||
for i, t in enumerate(timesteps):
|
||||
if t >= t_last:
|
||||
# compute the reverse: x_t-1 -> x_t
|
||||
latents = self.scheduler.undo_step(latents, t_last, generator)
|
||||
progress_bar.update()
|
||||
t_last = t
|
||||
continue
|
||||
|
||||
# expand the latents if we are doing classifier free guidance
|
||||
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
||||
|
||||
# concat latents, mask, masked_image_latents in the channel dimension
|
||||
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
||||
# latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
|
||||
|
||||
# predict the noise residual
|
||||
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample
|
||||
|
||||
# perform guidance
|
||||
if do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
||||
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
||||
|
||||
# compute the previous noisy sample x_t -> x_t-1
|
||||
latents = self.scheduler.step(
|
||||
noise_pred,
|
||||
t,
|
||||
latents,
|
||||
masked_image_latents,
|
||||
mask,
|
||||
**extra_step_kwargs,
|
||||
).prev_sample
|
||||
|
||||
# call the callback, if provided
|
||||
progress_bar.update()
|
||||
if callback is not None and i % callback_steps == 0:
|
||||
callback(i, t, latents)
|
||||
|
||||
t_last = t
|
||||
|
||||
# 11. Post-processing
|
||||
image = self.decode_latents(latents)
|
||||
|
||||
# 12. Run safety checker
|
||||
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
||||
|
||||
# 13. Convert to PIL
|
||||
if output_type == "pil":
|
||||
image = self.numpy_to_pil(image)
|
||||
|
||||
# Offload last model to CPU
|
||||
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
||||
self.final_offload_hook.offload()
|
||||
|
||||
if not return_dict:
|
||||
return (image, has_nsfw_concept)
|
||||
|
||||
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
||||
1055
examples/community/stable_diffusion_tensorrt_img2img.py
Executable file
1055
examples/community/stable_diffusion_tensorrt_img2img.py
Executable file
File diff suppressed because it is too large
Load Diff
1088
examples/community/stable_diffusion_tensorrt_inpaint.py
Executable file
1088
examples/community/stable_diffusion_tensorrt_inpaint.py
Executable file
File diff suppressed because it is too large
Load Diff
10
examples/community/stable_diffusion_tensorrt_txt2img.py
Normal file → Executable file
10
examples/community/stable_diffusion_tensorrt_txt2img.py
Normal file → Executable file
@@ -54,8 +54,9 @@ from diffusers.utils import DIFFUSERS_CACHE, logging
|
||||
|
||||
"""
|
||||
Installation instructions
|
||||
python3 -m pip install --upgrade tensorrt
|
||||
python3 -m pip install --upgrade polygraphy onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
|
||||
python3 -m pip install --upgrade transformers diffusers>=0.16.0
|
||||
python3 -m pip install --upgrade tensorrt>=8.6.1
|
||||
python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
|
||||
python3 -m pip install onnxruntime
|
||||
"""
|
||||
|
||||
@@ -132,7 +133,7 @@ class Engine:
|
||||
config_kwargs["tactic_sources"] = []
|
||||
|
||||
engine = engine_from_network(
|
||||
network_from_onnx_path(onnx_path),
|
||||
network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),
|
||||
config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **config_kwargs),
|
||||
save_timing_cache=timing_cache,
|
||||
)
|
||||
@@ -633,6 +634,7 @@ class TensorRTStableDiffusionPipeline(StableDiffusionPipeline):
|
||||
onnx_dir: str = "onnx",
|
||||
# TensorRT engine build parameters
|
||||
engine_dir: str = "engine",
|
||||
build_preview_features: bool = True,
|
||||
force_engine_rebuild: bool = False,
|
||||
timing_cache: str = "timing_cache",
|
||||
):
|
||||
@@ -652,7 +654,7 @@ class TensorRTStableDiffusionPipeline(StableDiffusionPipeline):
|
||||
self.timing_cache = timing_cache
|
||||
self.build_static_batch = False
|
||||
self.build_dynamic_shape = False
|
||||
self.build_preview_features = False
|
||||
self.build_preview_features = build_preview_features
|
||||
|
||||
self.max_batch_size = max_batch_size
|
||||
# TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.
|
||||
|
||||
@@ -55,11 +55,22 @@ if is_wandb_available():
|
||||
import wandb
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
check_min_version("0.16.0")
|
||||
check_min_version("0.17.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
def image_grid(imgs, rows, cols):
|
||||
assert len(imgs) == rows * cols
|
||||
|
||||
w, h = imgs[0].size
|
||||
grid = Image.new("RGB", size=(cols * w, rows * h))
|
||||
|
||||
for i, img in enumerate(imgs):
|
||||
grid.paste(img, box=(i % cols * w, i // cols * h))
|
||||
return grid
|
||||
|
||||
|
||||
def log_validation(vae, text_encoder, tokenizer, unet, controlnet, args, accelerator, weight_dtype, step):
|
||||
logger.info("Running validation... ")
|
||||
|
||||
@@ -156,6 +167,8 @@ def log_validation(vae, text_encoder, tokenizer, unet, controlnet, args, acceler
|
||||
else:
|
||||
logger.warn(f"image logging not implemented for {tracker.name}")
|
||||
|
||||
return image_logs
|
||||
|
||||
|
||||
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
|
||||
text_encoder_config = PretrainedConfig.from_pretrained(
|
||||
@@ -177,6 +190,43 @@ def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: st
|
||||
raise ValueError(f"{model_class} is not supported.")
|
||||
|
||||
|
||||
def save_model_card(repo_id: str, image_logs=None, base_model=str, repo_folder=None):
|
||||
img_str = ""
|
||||
if image_logs is not None:
|
||||
img_str = "You can find some example images below.\n"
|
||||
for i, log in enumerate(image_logs):
|
||||
images = log["images"]
|
||||
validation_prompt = log["validation_prompt"]
|
||||
validation_image = log["validation_image"]
|
||||
validation_image.save(os.path.join(repo_folder, "image_control.png"))
|
||||
img_str += f"prompt: {validation_prompt}\n"
|
||||
images = [validation_image] + images
|
||||
image_grid(images, 1, len(images)).save(os.path.join(repo_folder, f"images_{i}.png"))
|
||||
img_str += f"\n"
|
||||
|
||||
yaml = f"""
|
||||
---
|
||||
license: creativeml-openrail-m
|
||||
base_model: {base_model}
|
||||
tags:
|
||||
- stable-diffusion
|
||||
- stable-diffusion-diffusers
|
||||
- text-to-image
|
||||
- diffusers
|
||||
- controlnet
|
||||
inference: true
|
||||
---
|
||||
"""
|
||||
model_card = f"""
|
||||
# controlnet-{repo_id}
|
||||
|
||||
These are controlnet weights trained on {base_model} with new type of conditioning.
|
||||
{img_str}
|
||||
"""
|
||||
with open(os.path.join(repo_folder, "README.md"), "w") as f:
|
||||
f.write(yaml + model_card)
|
||||
|
||||
|
||||
def parse_args(input_args=None):
|
||||
parser = argparse.ArgumentParser(description="Simple example of a ControlNet training script.")
|
||||
parser.add_argument(
|
||||
@@ -486,7 +536,6 @@ def parse_args(input_args=None):
|
||||
"--tracker_project_name",
|
||||
type=str,
|
||||
default="train_controlnet",
|
||||
required=True,
|
||||
help=(
|
||||
"The `project_name` argument passed to Accelerator.init_trackers for"
|
||||
" more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator"
|
||||
@@ -930,7 +979,7 @@ def main(args):
|
||||
accelerator.load_state(os.path.join(args.output_dir, path))
|
||||
global_step = int(path.split("-")[1])
|
||||
|
||||
initial_global_step = global_step * args.gradient_accumulation_steps
|
||||
initial_global_step = global_step
|
||||
first_epoch = global_step // num_update_steps_per_epoch
|
||||
else:
|
||||
initial_global_step = 0
|
||||
@@ -943,6 +992,7 @@ def main(args):
|
||||
disable=not accelerator.is_local_main_process,
|
||||
)
|
||||
|
||||
image_logs = None
|
||||
for epoch in range(first_epoch, args.num_train_epochs):
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
with accelerator.accumulate(controlnet):
|
||||
@@ -1014,7 +1064,7 @@ def main(args):
|
||||
logger.info(f"Saved state to {save_path}")
|
||||
|
||||
if args.validation_prompt is not None and global_step % args.validation_steps == 0:
|
||||
log_validation(
|
||||
image_logs = log_validation(
|
||||
vae,
|
||||
text_encoder,
|
||||
tokenizer,
|
||||
@@ -1040,6 +1090,12 @@ def main(args):
|
||||
controlnet.save_pretrained(args.output_dir)
|
||||
|
||||
if args.push_to_hub:
|
||||
save_model_card(
|
||||
repo_id,
|
||||
image_logs=image_logs,
|
||||
base_model=args.pretrained_model_name_or_path,
|
||||
repo_folder=args.output_dir,
|
||||
)
|
||||
upload_folder(
|
||||
repo_id=repo_id,
|
||||
folder_path=args.output_dir,
|
||||
|
||||
@@ -59,7 +59,7 @@ if is_wandb_available():
|
||||
import wandb
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
check_min_version("0.16.0")
|
||||
check_min_version("0.17.0")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ from diffusers.utils.import_utils import is_xformers_available
|
||||
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
check_min_version("0.16.0")
|
||||
check_min_version("0.17.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
@@ -43,6 +43,8 @@ from accelerate.utils import write_basic_config
|
||||
write_basic_config()
|
||||
```
|
||||
|
||||
When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups.
|
||||
|
||||
### Dog toy example
|
||||
|
||||
Now let's get our dataset. For this example we will use some dog images: https://huggingface.co/datasets/diffusers/dog-example.
|
||||
@@ -80,7 +82,8 @@ accelerate launch train_dreambooth.py \
|
||||
--learning_rate=5e-6 \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--max_train_steps=400
|
||||
--max_train_steps=400 \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
### Training with prior-preservation loss
|
||||
@@ -109,7 +112,8 @@ accelerate launch train_dreambooth.py \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
--max_train_steps=800 \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
|
||||
@@ -141,7 +145,8 @@ accelerate launch train_dreambooth.py \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
--max_train_steps=800 \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
|
||||
@@ -176,7 +181,8 @@ accelerate launch train_dreambooth.py \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
--max_train_steps=800 \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
|
||||
@@ -218,7 +224,8 @@ accelerate launch --mixed_precision="fp16" train_dreambooth.py \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
--max_train_steps=800 \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
### Fine-tune text encoder with the UNet.
|
||||
@@ -251,7 +258,8 @@ accelerate launch train_dreambooth.py \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--num_class_images=200 \
|
||||
--max_train_steps=800
|
||||
--max_train_steps=800 \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
### Using DreamBooth for pipelines other than Stable Diffusion
|
||||
@@ -355,7 +363,7 @@ The final LoRA embedding weights have been uploaded to [patrickvonplaten/lora_dr
|
||||
The training results are summarized [here](https://api.wandb.ai/report/patrickvonplaten/xm6cd5q5).
|
||||
You can use the `Step` slider to see how the model learned the features of our subject while the model trained.
|
||||
|
||||
Optionally, we can also train additional LoRA layers for the text encoder. Specify the `train_text_encoder` argument above for that. If you're interested to know more about how we
|
||||
Optionally, we can also train additional LoRA layers for the text encoder. Specify the `--train_text_encoder` argument above for that. If you're interested to know more about how we
|
||||
enable this support, check out this [PR](https://github.com/huggingface/diffusers/pull/2918).
|
||||
|
||||
With the default hyperparameters from the above, the training seems to go in a positive direction. Check out [this panel](https://wandb.ai/sayakpaul/dreambooth-lora/reports/test-23-04-17-17-00-13---Vmlldzo0MDkwNjMy). The trained LoRA layers are available [here](https://huggingface.co/sayakpaul/dreambooth).
|
||||
@@ -387,6 +395,50 @@ Finally, we can run the model in inference.
|
||||
image = pipe("A picture of a sks dog in a bucket", num_inference_steps=25).images[0]
|
||||
```
|
||||
|
||||
If you are loading the LoRA parameters from the Hub and if the Hub repository has
|
||||
a `base_model` tag (such as [this](https://huggingface.co/patrickvonplaten/lora_dreambooth_dog_example/blob/main/README.md?code=true#L4)), then
|
||||
you can do:
|
||||
|
||||
```py
|
||||
from huggingface_hub.repocard import RepoCard
|
||||
|
||||
lora_model_id = "patrickvonplaten/lora_dreambooth_dog_example"
|
||||
card = RepoCard.load(lora_model_id)
|
||||
base_model_id = card.data.to_dict()["base_model"]
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16)
|
||||
...
|
||||
```
|
||||
|
||||
If you used `--train_text_encoder` during training, then use `pipe.load_lora_weights()` to load the LoRA
|
||||
weights. For example:
|
||||
|
||||
```python
|
||||
from huggingface_hub.repocard import RepoCard
|
||||
from diffusers import StableDiffusionPipeline
|
||||
import torch
|
||||
|
||||
lora_model_id = "sayakpaul/dreambooth-text-encoder-test"
|
||||
card = RepoCard.load(lora_model_id)
|
||||
base_model_id = card.data.to_dict()["base_model"]
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16)
|
||||
pipe = pipe.to("cuda")
|
||||
pipe.load_lora_weights(lora_model_id)
|
||||
image = pipe("A picture of a sks dog in a bucket", num_inference_steps=25).images[0]
|
||||
```
|
||||
|
||||
Note that the use of [`LoraLoaderMixin.load_lora_weights`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.load_lora_weights) is preferred to [`UNet2DConditionLoadersMixin.load_attn_procs`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.UNet2DConditionLoadersMixin.load_attn_procs) for loading LoRA parameters. This is because
|
||||
`LoraLoaderMixin.load_lora_weights` can handle the following situations:
|
||||
|
||||
* LoRA parameters that don't have separate identifiers for the UNet and the text encoder (such as [`"patrickvonplaten/lora_dreambooth_dog_example"`](https://huggingface.co/patrickvonplaten/lora_dreambooth_dog_example)). So, you can just do:
|
||||
|
||||
```py
|
||||
pipe.load_lora_weights(lora_model_path)
|
||||
```
|
||||
|
||||
* LoRA parameters that have separate identifiers for the UNet and the text encoder such as: [`"sayakpaul/dreambooth"`](https://huggingface.co/sayakpaul/dreambooth).
|
||||
|
||||
## Training with Flax/JAX
|
||||
|
||||
For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script.
|
||||
@@ -481,3 +533,207 @@ More info: https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_
|
||||
|
||||
### Experimental results
|
||||
You can refer to [this blog post](https://huggingface.co/blog/dreambooth) that discusses some of DreamBooth experiments in detail. Specifically, it recommends a set of DreamBooth-specific tips and tricks that we have found to work well for a variety of subjects.
|
||||
|
||||
## IF
|
||||
|
||||
You can use the lora and full dreambooth scripts to train the text to image [IF model](https://huggingface.co/DeepFloyd/IF-I-XL-v1.0) and the stage II upscaler
|
||||
[IF model](https://huggingface.co/DeepFloyd/IF-II-L-v1.0).
|
||||
|
||||
Note that IF has a predicted variance, and our finetuning scripts only train the models predicted error, so for finetuned IF models we switch to a fixed
|
||||
variance schedule. The full finetuning scripts will update the scheduler config for the full saved model. However, when loading saved LoRA weights, you
|
||||
must also update the pipeline's scheduler config.
|
||||
|
||||
```py
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0")
|
||||
|
||||
pipe.load_lora_weights("<lora weights path>")
|
||||
|
||||
# Update scheduler config to fixed variance schedule
|
||||
pipe.scheduler = pipe.scheduler.__class__.from_config(pipe.scheduler.config, variance_type="fixed_small")
|
||||
```
|
||||
|
||||
Additionally, a few alternative cli flags are needed for IF.
|
||||
|
||||
`--resolution=64`: IF is a pixel space diffusion model. In order to operate on un-compressed pixels, the input images are of a much smaller resolution.
|
||||
|
||||
`--pre_compute_text_embeddings`: IF uses [T5](https://huggingface.co/docs/transformers/model_doc/t5) for its text encoder. In order to save GPU memory, we pre compute all text embeddings and then de-allocate
|
||||
T5.
|
||||
|
||||
`--tokenizer_max_length=77`: T5 has a longer default text length, but the default IF encoding procedure uses a smaller number.
|
||||
|
||||
`--text_encoder_use_attention_mask`: T5 passes the attention mask to the text encoder.
|
||||
|
||||
### Tips and Tricks
|
||||
We find LoRA to be sufficient for finetuning the stage I model as the low resolution of the model makes representing finegrained detail hard regardless.
|
||||
|
||||
For common and/or not-visually complex object concepts, you can get away with not-finetuning the upscaler. Just be sure to adjust the prompt passed to the
|
||||
upscaler to remove the new token from the instance prompt. I.e. if your stage I prompt is "a sks dog", use "a dog" for your stage II prompt.
|
||||
|
||||
For finegrained detail like faces that aren't present in the original training set, we find that full finetuning of the stage II upscaler is better than
|
||||
LoRA finetuning stage II.
|
||||
|
||||
For finegrained detail like faces, we find that lower learning rates along with larger batch sizes work best.
|
||||
|
||||
For stage II, we find that lower learning rates are also needed.
|
||||
|
||||
We found experimentally that the DDPM scheduler with the default larger number of denoising steps to sometimes work better than the DPM Solver scheduler
|
||||
used in the training scripts.
|
||||
|
||||
### Stage II additional validation images
|
||||
|
||||
The stage II validation requires images to upscale, we can download a downsized version of the training set:
|
||||
|
||||
```py
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
local_dir = "./dog_downsized"
|
||||
snapshot_download(
|
||||
"diffusers/dog-example-downsized",
|
||||
local_dir=local_dir,
|
||||
repo_type="dataset",
|
||||
ignore_patterns=".gitattributes",
|
||||
)
|
||||
```
|
||||
|
||||
### IF stage I LoRA Dreambooth
|
||||
This training configuration requires ~28 GB VRAM.
|
||||
|
||||
```sh
|
||||
export MODEL_NAME="DeepFloyd/IF-I-XL-v1.0"
|
||||
export INSTANCE_DIR="dog"
|
||||
export OUTPUT_DIR="dreambooth_dog_lora"
|
||||
|
||||
accelerate launch train_dreambooth_lora.py \
|
||||
--report_to wandb \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--instance_prompt="a sks dog" \
|
||||
--resolution=64 \
|
||||
--train_batch_size=4 \
|
||||
--gradient_accumulation_steps=1 \
|
||||
--learning_rate=5e-6 \
|
||||
--scale_lr \
|
||||
--max_train_steps=1200 \
|
||||
--validation_prompt="a sks dog" \
|
||||
--validation_epochs=25 \
|
||||
--checkpointing_steps=100 \
|
||||
--pre_compute_text_embeddings \
|
||||
--tokenizer_max_length=77 \
|
||||
--text_encoder_use_attention_mask
|
||||
```
|
||||
|
||||
### IF stage II LoRA Dreambooth
|
||||
|
||||
`--validation_images`: These images are upscaled during validation steps.
|
||||
|
||||
`--class_labels_conditioning=timesteps`: Pass additional conditioning to the UNet needed for stage II.
|
||||
|
||||
`--learning_rate=1e-6`: Lower learning rate than stage I.
|
||||
|
||||
`--resolution=256`: The upscaler expects higher resolution inputs
|
||||
|
||||
```sh
|
||||
export MODEL_NAME="DeepFloyd/IF-II-L-v1.0"
|
||||
export INSTANCE_DIR="dog"
|
||||
export OUTPUT_DIR="dreambooth_dog_upscale"
|
||||
export VALIDATION_IMAGES="dog_downsized/image_1.png dog_downsized/image_2.png dog_downsized/image_3.png dog_downsized/image_4.png"
|
||||
|
||||
python train_dreambooth_lora.py \
|
||||
--report_to wandb \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--instance_prompt="a sks dog" \
|
||||
--resolution=256 \
|
||||
--train_batch_size=4 \
|
||||
--gradient_accumulation_steps=1 \
|
||||
--learning_rate=1e-6 \
|
||||
--max_train_steps=2000 \
|
||||
--validation_prompt="a sks dog" \
|
||||
--validation_epochs=100 \
|
||||
--checkpointing_steps=500 \
|
||||
--pre_compute_text_embeddings \
|
||||
--tokenizer_max_length=77 \
|
||||
--text_encoder_use_attention_mask \
|
||||
--validation_images $VALIDATION_IMAGES \
|
||||
--class_labels_conditioning=timesteps
|
||||
```
|
||||
|
||||
### IF Stage I Full Dreambooth
|
||||
`--skip_save_text_encoder`: When training the full model, this will skip saving the entire T5 with the finetuned model. You can still load the pipeline
|
||||
with a T5 loaded from the original model.
|
||||
|
||||
`use_8bit_adam`: Due to the size of the optimizer states, we recommend training the full XL IF model with 8bit adam.
|
||||
|
||||
`--learning_rate=1e-7`: For full dreambooth, IF requires very low learning rates. With higher learning rates model quality will degrade. Note that it is
|
||||
likely the learning rate can be increased with larger batch sizes.
|
||||
|
||||
Using 8bit adam and a batch size of 4, the model can be trained in ~48 GB VRAM.
|
||||
|
||||
```sh
|
||||
export MODEL_NAME="DeepFloyd/IF-I-XL-v1.0"
|
||||
|
||||
export INSTANCE_DIR="dog"
|
||||
export OUTPUT_DIR="dreambooth_if"
|
||||
|
||||
accelerate launch train_dreambooth.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--instance_prompt="a photo of sks dog" \
|
||||
--resolution=64 \
|
||||
--train_batch_size=4 \
|
||||
--gradient_accumulation_steps=1 \
|
||||
--learning_rate=1e-7 \
|
||||
--max_train_steps=150 \
|
||||
--validation_prompt "a photo of sks dog" \
|
||||
--validation_steps 25 \
|
||||
--text_encoder_use_attention_mask \
|
||||
--tokenizer_max_length 77 \
|
||||
--pre_compute_text_embeddings \
|
||||
--use_8bit_adam \
|
||||
--set_grads_to_none \
|
||||
--skip_save_text_encoder \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
### IF Stage II Full Dreambooth
|
||||
|
||||
`--learning_rate=5e-6`: With a smaller effective batch size of 4, we found that we required learning rates as low as
|
||||
1e-8.
|
||||
|
||||
`--resolution=256`: The upscaler expects higher resolution inputs
|
||||
|
||||
`--train_batch_size=2` and `--gradient_accumulation_steps=6`: We found that full training of stage II particularly with
|
||||
faces required large effective batch sizes.
|
||||
|
||||
```sh
|
||||
export MODEL_NAME="DeepFloyd/IF-II-L-v1.0"
|
||||
export INSTANCE_DIR="dog"
|
||||
export OUTPUT_DIR="dreambooth_dog_upscale"
|
||||
export VALIDATION_IMAGES="dog_downsized/image_1.png dog_downsized/image_2.png dog_downsized/image_3.png dog_downsized/image_4.png"
|
||||
|
||||
accelerate launch train_dreambooth.py \
|
||||
--report_to wandb \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--instance_prompt="a sks dog" \
|
||||
--resolution=256 \
|
||||
--train_batch_size=2 \
|
||||
--gradient_accumulation_steps=6 \
|
||||
--learning_rate=5e-6 \
|
||||
--max_train_steps=2000 \
|
||||
--validation_prompt="a sks dog" \
|
||||
--validation_steps=150 \
|
||||
--checkpointing_steps=500 \
|
||||
--pre_compute_text_embeddings \
|
||||
--tokenizer_max_length=77 \
|
||||
--text_encoder_use_attention_mask \
|
||||
--validation_images $VALIDATION_IMAGES \
|
||||
--class_labels_conditioning timesteps \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
|
||||
import argparse
|
||||
import gc
|
||||
import hashlib
|
||||
import itertools
|
||||
import logging
|
||||
@@ -22,7 +23,6 @@ import os
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
|
||||
import accelerate
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
@@ -31,9 +31,10 @@ import transformers
|
||||
from accelerate import Accelerator
|
||||
from accelerate.logging import get_logger
|
||||
from accelerate.utils import ProjectConfiguration, set_seed
|
||||
from huggingface_hub import create_repo, upload_folder
|
||||
from huggingface_hub import create_repo, model_info, upload_folder
|
||||
from packaging import version
|
||||
from PIL import Image
|
||||
from PIL.ImageOps import exif_transpose
|
||||
from torch.utils.data import Dataset
|
||||
from torchvision import transforms
|
||||
from tqdm.auto import tqdm
|
||||
@@ -45,6 +46,7 @@ from diffusers import (
|
||||
DDPMScheduler,
|
||||
DiffusionPipeline,
|
||||
DPMSolverMultistepScheduler,
|
||||
StableDiffusionPipeline,
|
||||
UNet2DConditionModel,
|
||||
)
|
||||
from diffusers.optimization import get_scheduler
|
||||
@@ -56,37 +58,115 @@ if is_wandb_available():
|
||||
import wandb
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
check_min_version("0.16.0")
|
||||
check_min_version("0.17.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
def log_validation(text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch):
|
||||
def save_model_card(
|
||||
repo_id: str,
|
||||
images=None,
|
||||
base_model=str,
|
||||
train_text_encoder=False,
|
||||
prompt=str,
|
||||
repo_folder=None,
|
||||
pipeline: DiffusionPipeline = None,
|
||||
):
|
||||
img_str = ""
|
||||
for i, image in enumerate(images):
|
||||
image.save(os.path.join(repo_folder, f"image_{i}.png"))
|
||||
img_str += f"\n"
|
||||
|
||||
yaml = f"""
|
||||
---
|
||||
license: creativeml-openrail-m
|
||||
base_model: {base_model}
|
||||
instance_prompt: {prompt}
|
||||
tags:
|
||||
- {'stable-diffusion' if isinstance(pipeline, StableDiffusionPipeline) else 'if'}
|
||||
- {'stable-diffusion-diffusers' if isinstance(pipeline, StableDiffusionPipeline) else 'if-diffusers'}
|
||||
- text-to-image
|
||||
- diffusers
|
||||
- dreambooth
|
||||
inference: true
|
||||
---
|
||||
"""
|
||||
model_card = f"""
|
||||
# DreamBooth - {repo_id}
|
||||
|
||||
This is a dreambooth model derived from {base_model}. The weights were trained on {prompt} using [DreamBooth](https://dreambooth.github.io/).
|
||||
You can find some example images in the following. \n
|
||||
{img_str}
|
||||
|
||||
DreamBooth for the text encoder was enabled: {train_text_encoder}.
|
||||
"""
|
||||
with open(os.path.join(repo_folder, "README.md"), "w") as f:
|
||||
f.write(yaml + model_card)
|
||||
|
||||
|
||||
def log_validation(
|
||||
text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch, prompt_embeds, negative_prompt_embeds
|
||||
):
|
||||
logger.info(
|
||||
f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
|
||||
f" {args.validation_prompt}."
|
||||
)
|
||||
|
||||
pipeline_args = {}
|
||||
|
||||
if vae is not None:
|
||||
pipeline_args["vae"] = vae
|
||||
|
||||
if text_encoder is not None:
|
||||
text_encoder = accelerator.unwrap_model(text_encoder)
|
||||
|
||||
# create pipeline (note: unet and vae are loaded again in float32)
|
||||
pipeline = DiffusionPipeline.from_pretrained(
|
||||
args.pretrained_model_name_or_path,
|
||||
text_encoder=accelerator.unwrap_model(text_encoder),
|
||||
tokenizer=tokenizer,
|
||||
text_encoder=text_encoder,
|
||||
unet=accelerator.unwrap_model(unet),
|
||||
vae=vae,
|
||||
revision=args.revision,
|
||||
torch_dtype=weight_dtype,
|
||||
**pipeline_args,
|
||||
)
|
||||
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
|
||||
|
||||
# We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
|
||||
scheduler_args = {}
|
||||
|
||||
if "variance_type" in pipeline.scheduler.config:
|
||||
variance_type = pipeline.scheduler.config.variance_type
|
||||
|
||||
if variance_type in ["learned", "learned_range"]:
|
||||
variance_type = "fixed_small"
|
||||
|
||||
scheduler_args["variance_type"] = variance_type
|
||||
|
||||
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args)
|
||||
pipeline = pipeline.to(accelerator.device)
|
||||
pipeline.set_progress_bar_config(disable=True)
|
||||
|
||||
if args.pre_compute_text_embeddings:
|
||||
pipeline_args = {
|
||||
"prompt_embeds": prompt_embeds,
|
||||
"negative_prompt_embeds": negative_prompt_embeds,
|
||||
}
|
||||
else:
|
||||
pipeline_args = {"prompt": args.validation_prompt}
|
||||
|
||||
# run inference
|
||||
generator = None if args.seed is None else torch.Generator(device=accelerator.device).manual_seed(args.seed)
|
||||
images = []
|
||||
for _ in range(args.num_validation_images):
|
||||
with torch.autocast("cuda"):
|
||||
image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
|
||||
images.append(image)
|
||||
if args.validation_images is None:
|
||||
for _ in range(args.num_validation_images):
|
||||
with torch.autocast("cuda"):
|
||||
image = pipeline(**pipeline_args, num_inference_steps=25, generator=generator).images[0]
|
||||
images.append(image)
|
||||
else:
|
||||
for image in args.validation_images:
|
||||
image = Image.open(image)
|
||||
image = pipeline(**pipeline_args, image=image, generator=generator).images[0]
|
||||
images.append(image)
|
||||
|
||||
for tracker in accelerator.trackers:
|
||||
if tracker.name == "tensorboard":
|
||||
@@ -104,6 +184,8 @@ def log_validation(text_encoder, tokenizer, unet, vae, args, accelerator, weight
|
||||
del pipeline
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
return images
|
||||
|
||||
|
||||
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
|
||||
text_encoder_config = PretrainedConfig.from_pretrained(
|
||||
@@ -121,6 +203,10 @@ def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: st
|
||||
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
|
||||
|
||||
return RobertaSeriesModelWithTransformation
|
||||
elif model_class == "T5EncoderModel":
|
||||
from transformers import T5EncoderModel
|
||||
|
||||
return T5EncoderModel
|
||||
else:
|
||||
raise ValueError(f"{model_class} is not supported.")
|
||||
|
||||
@@ -425,6 +511,40 @@ def parse_args(input_args=None):
|
||||
" See: https://www.crosslabs.org//blog/diffusion-with-offset-noise for more information."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pre_compute_text_embeddings",
|
||||
action="store_true",
|
||||
help="Whether or not to pre-compute text embeddings. If text embeddings are pre-computed, the text encoder will not be kept in memory during training and will leave more GPU memory available for training the rest of the model. This is not compatible with `--train_text_encoder`.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--tokenizer_max_length",
|
||||
type=int,
|
||||
default=None,
|
||||
required=False,
|
||||
help="The maximum length of the tokenizer. If not set, will default to the tokenizer's max length.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--text_encoder_use_attention_mask",
|
||||
action="store_true",
|
||||
required=False,
|
||||
help="Whether to use attention mask for the text encoder",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--skip_save_text_encoder", action="store_true", required=False, help="Set to not save text encoder"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--validation_images",
|
||||
required=False,
|
||||
default=None,
|
||||
nargs="+",
|
||||
help="Optional set of images to use for validation. Used when the target pipeline takes an initial image as input such as when training image variation or superresolution.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--class_labels_conditioning",
|
||||
required=False,
|
||||
default=None,
|
||||
help="The optional `class_label` conditioning to pass to the unet, available values are `timesteps`.",
|
||||
)
|
||||
|
||||
if input_args is not None:
|
||||
args = parser.parse_args(input_args)
|
||||
@@ -447,6 +567,9 @@ def parse_args(input_args=None):
|
||||
if args.class_prompt is not None:
|
||||
warnings.warn("You need not use --class_prompt without --with_prior_preservation.")
|
||||
|
||||
if args.train_text_encoder and args.pre_compute_text_embeddings:
|
||||
raise ValueError("`--train_text_encoder` cannot be used with `--pre_compute_text_embeddings`")
|
||||
|
||||
return args
|
||||
|
||||
|
||||
@@ -466,10 +589,16 @@ class DreamBoothDataset(Dataset):
|
||||
class_num=None,
|
||||
size=512,
|
||||
center_crop=False,
|
||||
encoder_hidden_states=None,
|
||||
instance_prompt_encoder_hidden_states=None,
|
||||
tokenizer_max_length=None,
|
||||
):
|
||||
self.size = size
|
||||
self.center_crop = center_crop
|
||||
self.tokenizer = tokenizer
|
||||
self.encoder_hidden_states = encoder_hidden_states
|
||||
self.instance_prompt_encoder_hidden_states = instance_prompt_encoder_hidden_states
|
||||
self.tokenizer_max_length = tokenizer_max_length
|
||||
|
||||
self.instance_data_root = Path(instance_data_root)
|
||||
if not self.instance_data_root.exists():
|
||||
@@ -508,43 +637,59 @@ class DreamBoothDataset(Dataset):
|
||||
def __getitem__(self, index):
|
||||
example = {}
|
||||
instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
|
||||
instance_image = exif_transpose(instance_image)
|
||||
|
||||
if not instance_image.mode == "RGB":
|
||||
instance_image = instance_image.convert("RGB")
|
||||
example["instance_images"] = self.image_transforms(instance_image)
|
||||
example["instance_prompt_ids"] = self.tokenizer(
|
||||
self.instance_prompt,
|
||||
truncation=True,
|
||||
padding="max_length",
|
||||
max_length=self.tokenizer.model_max_length,
|
||||
return_tensors="pt",
|
||||
).input_ids
|
||||
|
||||
if self.encoder_hidden_states is not None:
|
||||
example["instance_prompt_ids"] = self.encoder_hidden_states
|
||||
else:
|
||||
text_inputs = tokenize_prompt(
|
||||
self.tokenizer, self.instance_prompt, tokenizer_max_length=self.tokenizer_max_length
|
||||
)
|
||||
example["instance_prompt_ids"] = text_inputs.input_ids
|
||||
example["instance_attention_mask"] = text_inputs.attention_mask
|
||||
|
||||
if self.class_data_root:
|
||||
class_image = Image.open(self.class_images_path[index % self.num_class_images])
|
||||
class_image = exif_transpose(class_image)
|
||||
|
||||
if not class_image.mode == "RGB":
|
||||
class_image = class_image.convert("RGB")
|
||||
example["class_images"] = self.image_transforms(class_image)
|
||||
example["class_prompt_ids"] = self.tokenizer(
|
||||
self.class_prompt,
|
||||
truncation=True,
|
||||
padding="max_length",
|
||||
max_length=self.tokenizer.model_max_length,
|
||||
return_tensors="pt",
|
||||
).input_ids
|
||||
|
||||
if self.instance_prompt_encoder_hidden_states is not None:
|
||||
example["class_prompt_ids"] = self.instance_prompt_encoder_hidden_states
|
||||
else:
|
||||
class_text_inputs = tokenize_prompt(
|
||||
self.tokenizer, self.class_prompt, tokenizer_max_length=self.tokenizer_max_length
|
||||
)
|
||||
example["class_prompt_ids"] = class_text_inputs.input_ids
|
||||
example["class_attention_mask"] = class_text_inputs.attention_mask
|
||||
|
||||
return example
|
||||
|
||||
|
||||
def collate_fn(examples, with_prior_preservation=False):
|
||||
has_attention_mask = "instance_attention_mask" in examples[0]
|
||||
|
||||
input_ids = [example["instance_prompt_ids"] for example in examples]
|
||||
pixel_values = [example["instance_images"] for example in examples]
|
||||
|
||||
if has_attention_mask:
|
||||
attention_mask = [example["instance_attention_mask"] for example in examples]
|
||||
|
||||
# Concat class and instance examples for prior preservation.
|
||||
# We do this to avoid doing two forward passes.
|
||||
if with_prior_preservation:
|
||||
input_ids += [example["class_prompt_ids"] for example in examples]
|
||||
pixel_values += [example["class_images"] for example in examples]
|
||||
|
||||
if has_attention_mask:
|
||||
attention_mask += [example["class_attention_mask"] for example in examples]
|
||||
|
||||
pixel_values = torch.stack(pixel_values)
|
||||
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
|
||||
|
||||
@@ -554,6 +699,11 @@ def collate_fn(examples, with_prior_preservation=False):
|
||||
"input_ids": input_ids,
|
||||
"pixel_values": pixel_values,
|
||||
}
|
||||
|
||||
if has_attention_mask:
|
||||
attention_mask = torch.cat(attention_mask, dim=0)
|
||||
batch["attention_mask"] = attention_mask
|
||||
|
||||
return batch
|
||||
|
||||
|
||||
@@ -574,6 +724,50 @@ class PromptDataset(Dataset):
|
||||
return example
|
||||
|
||||
|
||||
def model_has_vae(args):
|
||||
config_file_name = os.path.join("vae", AutoencoderKL.config_name)
|
||||
if os.path.isdir(args.pretrained_model_name_or_path):
|
||||
config_file_name = os.path.join(args.pretrained_model_name_or_path, config_file_name)
|
||||
return os.path.isfile(config_file_name)
|
||||
else:
|
||||
files_in_repo = model_info(args.pretrained_model_name_or_path, revision=args.revision).siblings
|
||||
return any(file.rfilename == config_file_name for file in files_in_repo)
|
||||
|
||||
|
||||
def tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None):
|
||||
if tokenizer_max_length is not None:
|
||||
max_length = tokenizer_max_length
|
||||
else:
|
||||
max_length = tokenizer.model_max_length
|
||||
|
||||
text_inputs = tokenizer(
|
||||
prompt,
|
||||
truncation=True,
|
||||
padding="max_length",
|
||||
max_length=max_length,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
return text_inputs
|
||||
|
||||
|
||||
def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_attention_mask=None):
|
||||
text_input_ids = input_ids.to(text_encoder.device)
|
||||
|
||||
if text_encoder_use_attention_mask:
|
||||
attention_mask = attention_mask.to(text_encoder.device)
|
||||
else:
|
||||
attention_mask = None
|
||||
|
||||
prompt_embeds = text_encoder(
|
||||
text_input_ids,
|
||||
attention_mask=attention_mask,
|
||||
)
|
||||
prompt_embeds = prompt_embeds[0]
|
||||
|
||||
return prompt_embeds
|
||||
|
||||
|
||||
def main(args):
|
||||
logging_dir = Path(args.output_dir, args.logging_dir)
|
||||
|
||||
@@ -693,43 +887,50 @@ def main(args):
|
||||
text_encoder = text_encoder_cls.from_pretrained(
|
||||
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
|
||||
)
|
||||
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
|
||||
|
||||
if model_has_vae(args):
|
||||
vae = AutoencoderKL.from_pretrained(
|
||||
args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision
|
||||
)
|
||||
else:
|
||||
vae = None
|
||||
|
||||
unet = UNet2DConditionModel.from_pretrained(
|
||||
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
|
||||
)
|
||||
|
||||
# `accelerate` 0.16.0 will have better support for customized saving
|
||||
if version.parse(accelerate.__version__) >= version.parse("0.16.0"):
|
||||
# create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
|
||||
def save_model_hook(models, weights, output_dir):
|
||||
for model in models:
|
||||
sub_dir = "unet" if type(model) == type(unet) else "text_encoder"
|
||||
model.save_pretrained(os.path.join(output_dir, sub_dir))
|
||||
# create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
|
||||
def save_model_hook(models, weights, output_dir):
|
||||
for model in models:
|
||||
sub_dir = "unet" if isinstance(model, type(accelerator.unwrap_model(unet))) else "text_encoder"
|
||||
model.save_pretrained(os.path.join(output_dir, sub_dir))
|
||||
|
||||
# make sure to pop weight so that corresponding model is not saved again
|
||||
weights.pop()
|
||||
# make sure to pop weight so that corresponding model is not saved again
|
||||
weights.pop()
|
||||
|
||||
def load_model_hook(models, input_dir):
|
||||
while len(models) > 0:
|
||||
# pop models so that they are not loaded again
|
||||
model = models.pop()
|
||||
def load_model_hook(models, input_dir):
|
||||
while len(models) > 0:
|
||||
# pop models so that they are not loaded again
|
||||
model = models.pop()
|
||||
|
||||
if type(model) == type(text_encoder):
|
||||
# load transformers style into model
|
||||
load_model = text_encoder_cls.from_pretrained(input_dir, subfolder="text_encoder")
|
||||
model.config = load_model.config
|
||||
else:
|
||||
# load diffusers style into model
|
||||
load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet")
|
||||
model.register_to_config(**load_model.config)
|
||||
if isinstance(model, type(accelerator.unwrap_model(text_encoder))):
|
||||
# load transformers style into model
|
||||
load_model = text_encoder_cls.from_pretrained(input_dir, subfolder="text_encoder")
|
||||
model.config = load_model.config
|
||||
else:
|
||||
# load diffusers style into model
|
||||
load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet")
|
||||
model.register_to_config(**load_model.config)
|
||||
|
||||
model.load_state_dict(load_model.state_dict())
|
||||
del load_model
|
||||
model.load_state_dict(load_model.state_dict())
|
||||
del load_model
|
||||
|
||||
accelerator.register_save_state_pre_hook(save_model_hook)
|
||||
accelerator.register_load_state_pre_hook(load_model_hook)
|
||||
accelerator.register_save_state_pre_hook(save_model_hook)
|
||||
accelerator.register_load_state_pre_hook(load_model_hook)
|
||||
|
||||
if vae is not None:
|
||||
vae.requires_grad_(False)
|
||||
|
||||
vae.requires_grad_(False)
|
||||
if not args.train_text_encoder:
|
||||
text_encoder.requires_grad_(False)
|
||||
|
||||
@@ -803,6 +1004,44 @@ def main(args):
|
||||
eps=args.adam_epsilon,
|
||||
)
|
||||
|
||||
if args.pre_compute_text_embeddings:
|
||||
|
||||
def compute_text_embeddings(prompt):
|
||||
with torch.no_grad():
|
||||
text_inputs = tokenize_prompt(tokenizer, prompt, tokenizer_max_length=args.tokenizer_max_length)
|
||||
prompt_embeds = encode_prompt(
|
||||
text_encoder,
|
||||
text_inputs.input_ids,
|
||||
text_inputs.attention_mask,
|
||||
text_encoder_use_attention_mask=args.text_encoder_use_attention_mask,
|
||||
)
|
||||
|
||||
return prompt_embeds
|
||||
|
||||
pre_computed_encoder_hidden_states = compute_text_embeddings(args.instance_prompt)
|
||||
validation_prompt_negative_prompt_embeds = compute_text_embeddings("")
|
||||
|
||||
if args.validation_prompt is not None:
|
||||
validation_prompt_encoder_hidden_states = compute_text_embeddings(args.validation_prompt)
|
||||
else:
|
||||
validation_prompt_encoder_hidden_states = None
|
||||
|
||||
if args.instance_prompt is not None:
|
||||
pre_computed_instance_prompt_encoder_hidden_states = compute_text_embeddings(args.instance_prompt)
|
||||
else:
|
||||
pre_computed_instance_prompt_encoder_hidden_states = None
|
||||
|
||||
text_encoder = None
|
||||
tokenizer = None
|
||||
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
else:
|
||||
pre_computed_encoder_hidden_states = None
|
||||
validation_prompt_encoder_hidden_states = None
|
||||
validation_prompt_negative_prompt_embeds = None
|
||||
pre_computed_instance_prompt_encoder_hidden_states = None
|
||||
|
||||
# Dataset and DataLoaders creation:
|
||||
train_dataset = DreamBoothDataset(
|
||||
instance_data_root=args.instance_data_dir,
|
||||
@@ -813,6 +1052,9 @@ def main(args):
|
||||
tokenizer=tokenizer,
|
||||
size=args.resolution,
|
||||
center_crop=args.center_crop,
|
||||
encoder_hidden_states=pre_computed_encoder_hidden_states,
|
||||
instance_prompt_encoder_hidden_states=pre_computed_instance_prompt_encoder_hidden_states,
|
||||
tokenizer_max_length=args.tokenizer_max_length,
|
||||
)
|
||||
|
||||
train_dataloader = torch.utils.data.DataLoader(
|
||||
@@ -858,8 +1100,10 @@ def main(args):
|
||||
weight_dtype = torch.bfloat16
|
||||
|
||||
# Move vae and text_encoder to device and cast to weight_dtype
|
||||
vae.to(accelerator.device, dtype=weight_dtype)
|
||||
if not args.train_text_encoder:
|
||||
if vae is not None:
|
||||
vae.to(accelerator.device, dtype=weight_dtype)
|
||||
|
||||
if not args.train_text_encoder and text_encoder is not None:
|
||||
text_encoder.to(accelerator.device, dtype=weight_dtype)
|
||||
|
||||
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
||||
@@ -929,37 +1173,65 @@ def main(args):
|
||||
continue
|
||||
|
||||
with accelerator.accumulate(unet):
|
||||
# Convert images to latent space
|
||||
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
|
||||
latents = latents * vae.config.scaling_factor
|
||||
pixel_values = batch["pixel_values"].to(dtype=weight_dtype)
|
||||
|
||||
# Sample noise that we'll add to the latents
|
||||
if vae is not None:
|
||||
# Convert images to latent space
|
||||
model_input = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
|
||||
model_input = model_input * vae.config.scaling_factor
|
||||
else:
|
||||
model_input = pixel_values
|
||||
|
||||
# Sample noise that we'll add to the model input
|
||||
if args.offset_noise:
|
||||
noise = torch.randn_like(latents) + 0.1 * torch.randn(
|
||||
latents.shape[0], latents.shape[1], 1, 1, device=latents.device
|
||||
noise = torch.randn_like(model_input) + 0.1 * torch.randn(
|
||||
model_input.shape[0], model_input.shape[1], 1, 1, device=model_input.device
|
||||
)
|
||||
else:
|
||||
noise = torch.randn_like(latents)
|
||||
bsz = latents.shape[0]
|
||||
noise = torch.randn_like(model_input)
|
||||
bsz, channels, height, width = model_input.shape
|
||||
# Sample a random timestep for each image
|
||||
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
|
||||
timesteps = torch.randint(
|
||||
0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device
|
||||
)
|
||||
timesteps = timesteps.long()
|
||||
|
||||
# Add noise to the latents according to the noise magnitude at each timestep
|
||||
# Add noise to the model input according to the noise magnitude at each timestep
|
||||
# (this is the forward diffusion process)
|
||||
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
|
||||
noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)
|
||||
|
||||
# Get the text embedding for conditioning
|
||||
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
|
||||
if args.pre_compute_text_embeddings:
|
||||
encoder_hidden_states = batch["input_ids"]
|
||||
else:
|
||||
encoder_hidden_states = encode_prompt(
|
||||
text_encoder,
|
||||
batch["input_ids"],
|
||||
batch["attention_mask"],
|
||||
text_encoder_use_attention_mask=args.text_encoder_use_attention_mask,
|
||||
)
|
||||
|
||||
if accelerator.unwrap_model(unet).config.in_channels == channels * 2:
|
||||
noisy_model_input = torch.cat([noisy_model_input, noisy_model_input], dim=1)
|
||||
|
||||
if args.class_labels_conditioning == "timesteps":
|
||||
class_labels = timesteps
|
||||
else:
|
||||
class_labels = None
|
||||
|
||||
# Predict the noise residual
|
||||
model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
|
||||
model_pred = unet(
|
||||
noisy_model_input, timesteps, encoder_hidden_states, class_labels=class_labels
|
||||
).sample
|
||||
|
||||
if model_pred.shape[1] == 6:
|
||||
model_pred, _ = torch.chunk(model_pred, 2, dim=1)
|
||||
|
||||
# Get the target for loss depending on the prediction type
|
||||
if noise_scheduler.config.prediction_type == "epsilon":
|
||||
target = noise
|
||||
elif noise_scheduler.config.prediction_type == "v_prediction":
|
||||
target = noise_scheduler.get_velocity(latents, noise, timesteps)
|
||||
target = noise_scheduler.get_velocity(model_input, noise, timesteps)
|
||||
else:
|
||||
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
|
||||
|
||||
@@ -997,13 +1269,25 @@ def main(args):
|
||||
global_step += 1
|
||||
|
||||
if accelerator.is_main_process:
|
||||
images = []
|
||||
if global_step % args.checkpointing_steps == 0:
|
||||
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
|
||||
accelerator.save_state(save_path)
|
||||
logger.info(f"Saved state to {save_path}")
|
||||
|
||||
if args.validation_prompt is not None and global_step % args.validation_steps == 0:
|
||||
log_validation(text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch)
|
||||
images = log_validation(
|
||||
text_encoder,
|
||||
tokenizer,
|
||||
unet,
|
||||
vae,
|
||||
args,
|
||||
accelerator,
|
||||
weight_dtype,
|
||||
epoch,
|
||||
validation_prompt_encoder_hidden_states,
|
||||
validation_prompt_negative_prompt_embeds,
|
||||
)
|
||||
|
||||
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
|
||||
progress_bar.set_postfix(**logs)
|
||||
@@ -1015,15 +1299,46 @@ def main(args):
|
||||
# Create the pipeline using using the trained modules and save it.
|
||||
accelerator.wait_for_everyone()
|
||||
if accelerator.is_main_process:
|
||||
pipeline_args = {}
|
||||
|
||||
if text_encoder is not None:
|
||||
pipeline_args["text_encoder"] = accelerator.unwrap_model(text_encoder)
|
||||
|
||||
if args.skip_save_text_encoder:
|
||||
pipeline_args["text_encoder"] = None
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained(
|
||||
args.pretrained_model_name_or_path,
|
||||
unet=accelerator.unwrap_model(unet),
|
||||
text_encoder=accelerator.unwrap_model(text_encoder),
|
||||
revision=args.revision,
|
||||
**pipeline_args,
|
||||
)
|
||||
|
||||
# We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
|
||||
scheduler_args = {}
|
||||
|
||||
if "variance_type" in pipeline.scheduler.config:
|
||||
variance_type = pipeline.scheduler.config.variance_type
|
||||
|
||||
if variance_type in ["learned", "learned_range"]:
|
||||
variance_type = "fixed_small"
|
||||
|
||||
scheduler_args["variance_type"] = variance_type
|
||||
|
||||
pipeline.scheduler = pipeline.scheduler.from_config(pipeline.scheduler.config, **scheduler_args)
|
||||
|
||||
pipeline.save_pretrained(args.output_dir)
|
||||
|
||||
if args.push_to_hub:
|
||||
save_model_card(
|
||||
repo_id,
|
||||
images=images,
|
||||
base_model=args.pretrained_model_name_or_path,
|
||||
train_text_encoder=args.train_text_encoder,
|
||||
prompt=args.instance_prompt,
|
||||
repo_folder=args.output_dir,
|
||||
pipeline=pipeline,
|
||||
)
|
||||
upload_folder(
|
||||
repo_id=repo_id,
|
||||
folder_path=args.output_dir,
|
||||
|
||||
@@ -36,7 +36,7 @@ from diffusers.utils import check_min_version
|
||||
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
check_min_version("0.16.0")
|
||||
check_min_version("0.17.0")
|
||||
|
||||
# Cache compiled models across invocations of this script.
|
||||
cc.initialize_cache(os.path.expanduser("~/.cache/jax/compilation_cache"))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user