mirror of
https://github.com/huggingface/diffusers.git
synced 2025-12-10 06:24:19 +08:00
Compare commits
457 Commits
deprecate-
...
custom-blo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0b6dcf696a | ||
|
|
df8dd77817 | ||
|
|
9f3c0fdcd8 | ||
|
|
84e16575e4 | ||
|
|
55d49d4379 | ||
|
|
40528e9ae7 | ||
|
|
dc622a95d0 | ||
|
|
ecfbc8f952 | ||
|
|
df0e2a4f2c | ||
|
|
303efd2b8d | ||
|
|
5afbcce176 | ||
|
|
6d1a648602 | ||
|
|
250f5cb53d | ||
|
|
dc6bd1511a | ||
|
|
500b9cf184 | ||
|
|
d34b18c783 | ||
|
|
7536f647e4 | ||
|
|
a138d71ec1 | ||
|
|
bc4039886d | ||
|
|
9c3b58dcf1 | ||
|
|
74b5fed434 | ||
|
|
85eb505672 | ||
|
|
ccdd96ca52 | ||
|
|
4c723d8ec3 | ||
|
|
bec2d8eaea | ||
|
|
a0a51eb098 | ||
|
|
a5a0ccf86a | ||
|
|
dd07b19e27 | ||
|
|
57636ad4f4 | ||
|
|
cefc2cf82d | ||
|
|
b3e56e71fb | ||
|
|
5b5fa49a89 | ||
|
|
decfa3c9e1 | ||
|
|
48305755bf | ||
|
|
7853bfbed7 | ||
|
|
23ebbb4bc8 | ||
|
|
1b456bd5d5 | ||
|
|
af769881d3 | ||
|
|
4715c5c769 | ||
|
|
dbe413668d | ||
|
|
26475082cb | ||
|
|
f072c64bf2 | ||
|
|
aed636f5f0 | ||
|
|
53a10518b9 | ||
|
|
b4e6dc3037 | ||
|
|
3eb40786ca | ||
|
|
a4bc845478 | ||
|
|
fa468c5d57 | ||
|
|
8abc7aeb71 | ||
|
|
693d8a3a52 | ||
|
|
a9df12ab45 | ||
|
|
a519272d97 | ||
|
|
345864eb85 | ||
|
|
35e538d46a | ||
|
|
2dc31677e1 | ||
|
|
1066de8c69 | ||
|
|
2d69bacb00 | ||
|
|
0974b4c606 | ||
|
|
cf4b97b233 | ||
|
|
7f3e9b8695 | ||
|
|
ce90f9b2db | ||
|
|
c3675d4c9b | ||
|
|
2b7deffe36 | ||
|
|
941ac9c3d9 | ||
|
|
7242b5ff62 | ||
|
|
b4297967a0 | ||
|
|
9ae5b6299d | ||
|
|
814d710e56 | ||
|
|
cc5b31ffc9 | ||
|
|
d7a1a0363f | ||
|
|
b59654544b | ||
|
|
0e12ba7454 | ||
|
|
20fd00b14b | ||
|
|
76d4e416bc | ||
|
|
c07fcf780a | ||
|
|
ccedeca96e | ||
|
|
64a5187d96 | ||
|
|
0a151115bb | ||
|
|
19085ac8f4 | ||
|
|
041501aea9 | ||
|
|
9c0944581a | ||
|
|
4588bbeb42 | ||
|
|
ec5449f3a1 | ||
|
|
310fdaf556 | ||
|
|
dcb6dd9b7a | ||
|
|
043ab2520f | ||
|
|
351b2f172a | ||
|
|
08c29020dd | ||
|
|
7a58734994 | ||
|
|
9ef118509e | ||
|
|
7c54a7b38a | ||
|
|
09e777a3e1 | ||
|
|
a72bc0c4bb | ||
|
|
80de641c1c | ||
|
|
76810eca2b | ||
|
|
1448b03585 | ||
|
|
5796735015 | ||
|
|
d8310a8fca | ||
|
|
78031c2938 | ||
|
|
d83d35c1bb | ||
|
|
843355f89f | ||
|
|
c006a95df1 | ||
|
|
df267ee4e8 | ||
|
|
edd614ea38 | ||
|
|
7e7e62c6ff | ||
|
|
eda9ff8300 | ||
|
|
efb7a299af | ||
|
|
d06750a5fd | ||
|
|
8c72cd12ee | ||
|
|
751e250f70 | ||
|
|
b50014067d | ||
|
|
f5c113e439 | ||
|
|
5e181eddfe | ||
|
|
55f0b3d758 | ||
|
|
eb7ef26736 | ||
|
|
e1b7f1f240 | ||
|
|
9e7ae568d6 | ||
|
|
f7b79452b4 | ||
|
|
43459079ab | ||
|
|
4067d6c4b6 | ||
|
|
28106fcac4 | ||
|
|
c222570a9b | ||
|
|
4e36bb0d23 | ||
|
|
f50b18eec7 | ||
|
|
fc337d5853 | ||
|
|
32798bf242 | ||
|
|
c2e5ece08b | ||
|
|
764b62473a | ||
|
|
6682956333 | ||
|
|
ffc8c0c1e1 | ||
|
|
4acbfbf13b | ||
|
|
6549b04ec6 | ||
|
|
130fd8df54 | ||
|
|
bcd4d77ba6 | ||
|
|
006d092751 | ||
|
|
9e4a75b142 | ||
|
|
0ff1aa910c | ||
|
|
901da9dccc | ||
|
|
67ffa7031e | ||
|
|
827fad66a0 | ||
|
|
9b721db205 | ||
|
|
ba0e732eb0 | ||
|
|
b2da59b197 | ||
|
|
7aa6af1138 | ||
|
|
87b800e154 | ||
|
|
e58711e73c | ||
|
|
cbecc33570 | ||
|
|
5237a82a35 | ||
|
|
513dbdb2f3 | ||
|
|
865ba102b3 | ||
|
|
552c127c05 | ||
|
|
4b7fe044e3 | ||
|
|
532f41c999 | ||
|
|
5fcd5f560f | ||
|
|
0fd7ee79ea | ||
|
|
0d1c5b0c3e | ||
|
|
0e46c55931 | ||
|
|
8f8888a76e | ||
|
|
afc9721898 | ||
|
|
2c4ee10b77 | ||
|
|
cf1ca728ea | ||
|
|
144e6e2540 | ||
|
|
22b229ba66 | ||
|
|
a840c39ad8 | ||
|
|
9a7ae77a4e | ||
|
|
673d4357ff | ||
|
|
561ab54de3 | ||
|
|
b60faf456b | ||
|
|
3e73dc24a4 | ||
|
|
d03240801f | ||
|
|
e62804ffbd | ||
|
|
bb1d9a8b75 | ||
|
|
91a151b5c6 | ||
|
|
4fcd0bc7eb | ||
|
|
7993be9e7f | ||
|
|
7a2b78bf0f | ||
|
|
f868d4b58b | ||
|
|
cc48b9368f | ||
|
|
dba4e007fe | ||
|
|
8d1de40891 | ||
|
|
8cc528c5e7 | ||
|
|
3c50f0cdad | ||
|
|
555b6cc34f | ||
|
|
5b53f67f06 | ||
|
|
9918d13eba | ||
|
|
e824660436 | ||
|
|
03be15e890 | ||
|
|
85cbe589a7 | ||
|
|
4d9b82297f | ||
|
|
76c809e2ef | ||
|
|
e682af2027 | ||
|
|
a58a4f665b | ||
|
|
8701e8644b | ||
|
|
58bf268261 | ||
|
|
1b48db4c8f | ||
|
|
46a0c6aa82 | ||
|
|
421ee07e33 | ||
|
|
123506ee59 | ||
|
|
8c48ec05ed | ||
|
|
a6d2fc2c1d | ||
|
|
bc2762cce9 | ||
|
|
baa9b582f3 | ||
|
|
da096a4999 | ||
|
|
480fb357a3 | ||
|
|
38740ddbd8 | ||
|
|
72282876b2 | ||
|
|
3552279a23 | ||
|
|
f8ba5cd77a | ||
|
|
c9c8217306 | ||
|
|
135df5be9d | ||
|
|
4a9dbd56f6 | ||
|
|
630d27fe5b | ||
|
|
f442955c6e | ||
|
|
ff9a387618 | ||
|
|
03c3f69aa5 | ||
|
|
f20aba3e87 | ||
|
|
ccf2c31188 | ||
|
|
7b10e4ae65 | ||
|
|
3c0531bc50 | ||
|
|
a8e47978c6 | ||
|
|
50e18ee698 | ||
|
|
4b17fa2a2e | ||
|
|
d45199a2f1 | ||
|
|
061163142d | ||
|
|
5780776c8a | ||
|
|
f19421e27c | ||
|
|
69cdc25746 | ||
|
|
cfd6ec7465 | ||
|
|
1082c46afa | ||
|
|
ba2ba9019f | ||
|
|
fa4c0e5e2e | ||
|
|
b793debd9d | ||
|
|
377057126c | ||
|
|
5937e11d85 | ||
|
|
9c1d4e3be1 | ||
|
|
7ea065c507 | ||
|
|
7a7a487396 | ||
|
|
4efb4db9d0 | ||
|
|
639fd12a20 | ||
|
|
69a9828f4d | ||
|
|
11d22e0e80 | ||
|
|
9a38fab5ae | ||
|
|
cb8e61ed2f | ||
|
|
8e53cd959e | ||
|
|
359b605f4b | ||
|
|
6febc08bfc | ||
|
|
9a2eaed002 | ||
|
|
0c71189abe | ||
|
|
58d2b10a2e | ||
|
|
20e0740b88 | ||
|
|
9d313fc718 | ||
|
|
f83dd5c984 | ||
|
|
c052791b5f | ||
|
|
843e3f9346 | ||
|
|
d8854b8d54 | ||
|
|
327e251b81 | ||
|
|
dfa48831e2 | ||
|
|
94df8ef68a | ||
|
|
203dc520a7 | ||
|
|
56d4387270 | ||
|
|
edcbe8038b | ||
|
|
c02c4a6d27 | ||
|
|
6f3ac3050f | ||
|
|
a6d9f6a1a9 | ||
|
|
284150449d | ||
|
|
3d2f8ae99b | ||
|
|
f36ba9f094 | ||
|
|
1c50a5f7e0 | ||
|
|
7ae6347e33 | ||
|
|
178d32dedd | ||
|
|
ef1e628729 | ||
|
|
173e1b147d | ||
|
|
e46e139f95 | ||
|
|
14725164be | ||
|
|
638cc035e5 | ||
|
|
9db9be65f3 | ||
|
|
d87134ada4 | ||
|
|
67a8ec8bf5 | ||
|
|
cde02b061b | ||
|
|
5dc503aa28 | ||
|
|
c6fbcf717b | ||
|
|
b9e99654e1 | ||
|
|
478df933c3 | ||
|
|
18c8f10f20 | ||
|
|
7298bdd817 | ||
|
|
9c13f86579 | ||
|
|
5c5209720e | ||
|
|
aa14f090f8 | ||
|
|
c5d6e0b537 | ||
|
|
39831599f1 | ||
|
|
b73c738392 | ||
|
|
06fd427797 | ||
|
|
48a551251d | ||
|
|
6398fbc391 | ||
|
|
3c8b67b371 | ||
|
|
9feb946432 | ||
|
|
c90352754a | ||
|
|
7a935a0bbe | ||
|
|
941b7fc084 | ||
|
|
76a62ac9cc | ||
|
|
1c6ab9e900 | ||
|
|
265840a098 | ||
|
|
9f4d997d8f | ||
|
|
b41abb2230 | ||
|
|
f33b89bafb | ||
|
|
48a6d29550 | ||
|
|
2d3d376bc0 | ||
|
|
db715e2c8c | ||
|
|
754fe85cac | ||
|
|
cc1f9a2ce3 | ||
|
|
737d7fc3b0 | ||
|
|
be23f7df00 | ||
|
|
86becea77f | ||
|
|
7e3bf4aff6 | ||
|
|
de043c6044 | ||
|
|
4c20624cc6 | ||
|
|
0454fbb30b | ||
|
|
cbc8ced20f | ||
|
|
01240fecb0 | ||
|
|
ce338d4e4a | ||
|
|
bc55b631fd | ||
|
|
15d50f16f2 | ||
|
|
2c30287958 | ||
|
|
425a715e35 | ||
|
|
2527917528 | ||
|
|
e6639fef70 | ||
|
|
8c938fb410 | ||
|
|
f864a9a352 | ||
|
|
d6fa3298fa | ||
|
|
6f1d6694df | ||
|
|
0e95aa853e | ||
|
|
5ef74fd5f6 | ||
|
|
64a9210315 | ||
|
|
d31b8cea3e | ||
|
|
62e847db5f | ||
|
|
470458623e | ||
|
|
a79c3af6bb | ||
|
|
3f3f0c16a6 | ||
|
|
f3e1310469 | ||
|
|
87f83d3dd9 | ||
|
|
f064b3bf73 | ||
|
|
3b079ec3fa | ||
|
|
bc34fa8386 | ||
|
|
05e7a854d0 | ||
|
|
76ec3d1fee | ||
|
|
cdaf84a708 | ||
|
|
e8e44a510c | ||
|
|
21543de571 | ||
|
|
d7dd924ece | ||
|
|
00f95b9755 | ||
|
|
eea76892e8 | ||
|
|
27bf7fcd0e | ||
|
|
a185e1ab91 | ||
|
|
d93381cd41 | ||
|
|
3649d7b903 | ||
|
|
10c36e0b78 | ||
|
|
8846635873 | ||
|
|
dd285099eb | ||
|
|
80f27d7e8d | ||
|
|
d3e27e05f0 | ||
|
|
5df02fc171 | ||
|
|
7392c8ff5a | ||
|
|
474a248f10 | ||
|
|
7bc0a07b19 | ||
|
|
92542719ed | ||
|
|
6760300202 | ||
|
|
798265f2b6 | ||
|
|
cd813499be | ||
|
|
fbddf02807 | ||
|
|
f20b83a04f | ||
|
|
ee40088fe5 | ||
|
|
7fc53b5d66 | ||
|
|
0874dd04dc | ||
|
|
6184d8a433 | ||
|
|
5a6e386464 | ||
|
|
42077e6c73 | ||
|
|
3d8d8485fc | ||
|
|
195926bbdc | ||
|
|
85a916bb8b | ||
|
|
3287ce2890 | ||
|
|
0c11c8c1ac | ||
|
|
fc51583c8a | ||
|
|
fb57c76aa1 | ||
|
|
7251bb4fd0 | ||
|
|
3fba74e153 | ||
|
|
a4df8dbc40 | ||
|
|
48eae6f420 | ||
|
|
66394bf6c7 | ||
|
|
62cce3045d | ||
|
|
05e867784d | ||
|
|
d72184eba3 | ||
|
|
5ce4814af1 | ||
|
|
1bc6f3dc0f | ||
|
|
79bd7ecc78 | ||
|
|
9b834f8710 | ||
|
|
81426b0f19 | ||
|
|
f0dba33d82 | ||
|
|
d1db4f853a | ||
|
|
8adc6003ba | ||
|
|
9f91305f85 | ||
|
|
368958df6f | ||
|
|
e52ceae375 | ||
|
|
62cbde8d41 | ||
|
|
648e8955cf | ||
|
|
00b179fb1a | ||
|
|
47ef79464f | ||
|
|
b272807bc8 | ||
|
|
447ccd0679 | ||
|
|
f3e09114f2 | ||
|
|
91545666e0 | ||
|
|
b6f7933044 | ||
|
|
33e636cea5 | ||
|
|
e27142ac64 | ||
|
|
8e88495da2 | ||
|
|
b79803fe08 | ||
|
|
b0f7036d9a | ||
|
|
6c7fad7ec8 | ||
|
|
5b0dab1253 | ||
|
|
7c6e9ef425 | ||
|
|
f46abfe4ce | ||
|
|
73a9d5856f | ||
|
|
16c955c5fd | ||
|
|
0f91f2f6fc | ||
|
|
745199a869 | ||
|
|
0142f6f35a | ||
|
|
d04cd95012 | ||
|
|
c934720629 | ||
|
|
9f48394bf7 | ||
|
|
20273e5503 | ||
|
|
d4dc4d7654 | ||
|
|
3a31b291f1 | ||
|
|
b975bceff3 | ||
|
|
8183d0f16e | ||
|
|
6508da6f06 | ||
|
|
d0ec6601df | ||
|
|
a7aa8bf28a | ||
|
|
3651bdb766 | ||
|
|
df55f05358 | ||
|
|
89ddb6c0a4 | ||
|
|
be2fb77dc1 | ||
|
|
54cddc1e12 | ||
|
|
28ef0165b9 | ||
|
|
a4da216125 | ||
|
|
5939ace91b | ||
|
|
cc59505e26 | ||
|
|
5f5d02fbf1 | ||
|
|
53748217e6 | ||
|
|
826f43505d | ||
|
|
4af76d0d7d | ||
|
|
b5c2050a16 | ||
|
|
7ae546f8d1 | ||
|
|
f64fa9492d | ||
|
|
049082e013 | ||
|
|
f161e277d0 | ||
|
|
a5f4cc7f84 | ||
|
|
c36f8487df | ||
|
|
54af3ca7fd |
50
.github/workflows/benchmark.yml
vendored
50
.github/workflows/benchmark.yml
vendored
@@ -7,24 +7,25 @@ on:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
DIFFUSERS_IS_CI: yes
|
DIFFUSERS_IS_CI: yes
|
||||||
HF_HUB_ENABLE_HF_TRANSFER: 1
|
HF_XET_HIGH_PERFORMANCE: 1
|
||||||
HF_HOME: /mnt/cache
|
HF_HOME: /mnt/cache
|
||||||
OMP_NUM_THREADS: 8
|
OMP_NUM_THREADS: 8
|
||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
|
BASE_PATH: benchmark_outputs
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
torch_pipelines_cuda_benchmark_tests:
|
torch_models_cuda_benchmark_tests:
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_BENCHMARK }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_BENCHMARK }}
|
||||||
name: Torch Core Pipelines CUDA Benchmarking Tests
|
name: Torch Core Models CUDA Benchmarking Tests
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
max-parallel: 1
|
max-parallel: 1
|
||||||
runs-on:
|
runs-on:
|
||||||
group: aws-g6-4xlarge-plus
|
group: aws-g6e-4xlarge
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-compile-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "16gb" --ipc host --gpus 0
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -35,27 +36,46 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
apt update
|
||||||
python -m uv pip install -e [quality,test]
|
apt install -y libpq-dev postgresql-client
|
||||||
python -m uv pip install pandas peft
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip uninstall transformers && python -m uv pip install transformers==4.48.0
|
uv pip install -r benchmarks/requirements.txt
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
- name: Diffusers Benchmarking
|
- name: Diffusers Benchmarking
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_BOT_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
BASE_PATH: benchmark_outputs
|
|
||||||
run: |
|
run: |
|
||||||
export TOTAL_GPU_MEMORY=$(python -c "import torch; print(torch.cuda.get_device_properties(0).total_memory / (1024**3))")
|
cd benchmarks && python run_all.py
|
||||||
cd benchmarks && mkdir ${BASE_PATH} && python run_all.py && python push_results.py
|
|
||||||
|
- name: Push results to the Hub
|
||||||
|
env:
|
||||||
|
HF_TOKEN: ${{ secrets.DIFFUSERS_BOT_TOKEN }}
|
||||||
|
run: |
|
||||||
|
cd benchmarks && python push_results.py
|
||||||
|
mkdir $BASE_PATH && cp *.csv $BASE_PATH
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: benchmark_test_reports
|
name: benchmark_test_reports
|
||||||
path: benchmarks/benchmark_outputs
|
path: benchmarks/${{ env.BASE_PATH }}
|
||||||
|
|
||||||
|
# TODO: enable this once the connection problem has been resolved.
|
||||||
|
- name: Update benchmarking results to DB
|
||||||
|
env:
|
||||||
|
PGDATABASE: metrics
|
||||||
|
PGHOST: ${{ secrets.DIFFUSERS_BENCHMARKS_PGHOST }}
|
||||||
|
PGUSER: transformers_benchmarks
|
||||||
|
PGPASSWORD: ${{ secrets.DIFFUSERS_BENCHMARKS_PGPASSWORD }}
|
||||||
|
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
|
||||||
|
run: |
|
||||||
|
git config --global --add safe.directory /__w/diffusers/diffusers
|
||||||
|
commit_id=$GITHUB_SHA
|
||||||
|
commit_msg=$(git show -s --format=%s "$commit_id" | cut -c1-70)
|
||||||
|
cd benchmarks && python populate_into_db.py "$BRANCH_NAME" "$commit_id" "$commit_msg"
|
||||||
|
|
||||||
- name: Report success status
|
- name: Report success status
|
||||||
if: ${{ success() }}
|
if: ${{ success() }}
|
||||||
|
|||||||
45
.github/workflows/build_docker_images.yml
vendored
45
.github/workflows/build_docker_images.yml
vendored
@@ -38,15 +38,43 @@ jobs:
|
|||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build Changed Docker Images
|
- name: Build Changed Docker Images
|
||||||
|
env:
|
||||||
|
CHANGED_FILES: ${{ steps.file_changes.outputs.all }}
|
||||||
run: |
|
run: |
|
||||||
CHANGED_FILES="${{ steps.file_changes.outputs.all }}"
|
echo "$CHANGED_FILES"
|
||||||
|
ALLOWED_IMAGES=(
|
||||||
|
diffusers-pytorch-cpu
|
||||||
|
diffusers-pytorch-cuda
|
||||||
|
diffusers-pytorch-xformers-cuda
|
||||||
|
diffusers-pytorch-minimum-cuda
|
||||||
|
diffusers-doc-builder
|
||||||
|
)
|
||||||
|
|
||||||
|
declare -A IMAGES_TO_BUILD=()
|
||||||
|
|
||||||
for FILE in $CHANGED_FILES; do
|
for FILE in $CHANGED_FILES; do
|
||||||
if [[ "$FILE" == docker/*Dockerfile ]]; then
|
# skip anything that isn't still on disk
|
||||||
DOCKER_PATH="${FILE%/Dockerfile}"
|
if [[ ! -e "$FILE" ]]; then
|
||||||
DOCKER_TAG=$(basename "$DOCKER_PATH")
|
echo "Skipping removed file $FILE"
|
||||||
echo "Building Docker image for $DOCKER_TAG"
|
continue
|
||||||
docker build -t "$DOCKER_TAG" "$DOCKER_PATH"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
for IMAGE in "${ALLOWED_IMAGES[@]}"; do
|
||||||
|
if [[ "$FILE" == docker/${IMAGE}/* ]]; then
|
||||||
|
IMAGES_TO_BUILD["$IMAGE"]=1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ ${#IMAGES_TO_BUILD[@]} -eq 0 ]]; then
|
||||||
|
echo "No relevant Docker changes detected."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
for IMAGE in "${!IMAGES_TO_BUILD[@]}"; do
|
||||||
|
DOCKER_PATH="docker/${IMAGE}"
|
||||||
|
echo "Building Docker image for $IMAGE"
|
||||||
|
docker build -t "$IMAGE" "$DOCKER_PATH"
|
||||||
done
|
done
|
||||||
if: steps.file_changes.outputs.all != ''
|
if: steps.file_changes.outputs.all != ''
|
||||||
|
|
||||||
@@ -65,13 +93,8 @@ jobs:
|
|||||||
image-name:
|
image-name:
|
||||||
- diffusers-pytorch-cpu
|
- diffusers-pytorch-cpu
|
||||||
- diffusers-pytorch-cuda
|
- diffusers-pytorch-cuda
|
||||||
- diffusers-pytorch-compile-cuda
|
|
||||||
- diffusers-pytorch-xformers-cuda
|
- diffusers-pytorch-xformers-cuda
|
||||||
- diffusers-pytorch-minimum-cuda
|
- diffusers-pytorch-minimum-cuda
|
||||||
- diffusers-flax-cpu
|
|
||||||
- diffusers-flax-tpu
|
|
||||||
- diffusers-onnxruntime-cpu
|
|
||||||
- diffusers-onnxruntime-cuda
|
|
||||||
- diffusers-doc-builder
|
- diffusers-doc-builder
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
|||||||
26
.github/workflows/build_pr_documentation.yml
vendored
26
.github/workflows/build_pr_documentation.yml
vendored
@@ -12,7 +12,33 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
check-links:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.10'
|
||||||
|
|
||||||
|
- name: Install uv
|
||||||
|
run: |
|
||||||
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||||
|
|
||||||
|
- name: Install doc-builder
|
||||||
|
run: |
|
||||||
|
uv pip install --system git+https://github.com/huggingface/doc-builder.git@main
|
||||||
|
|
||||||
|
- name: Check documentation links
|
||||||
|
run: |
|
||||||
|
uv run doc-builder check-links docs/source/en
|
||||||
|
|
||||||
build:
|
build:
|
||||||
|
needs: check-links
|
||||||
uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main
|
uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main
|
||||||
with:
|
with:
|
||||||
commit_sha: ${{ github.event.pull_request.head.sha }}
|
commit_sha: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|||||||
@@ -74,19 +74,19 @@ jobs:
|
|||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install --upgrade huggingface_hub
|
pip install --upgrade huggingface_hub
|
||||||
|
|
||||||
# Check secret is set
|
# Check secret is set
|
||||||
- name: whoami
|
- name: whoami
|
||||||
run: huggingface-cli whoami
|
run: hf auth whoami
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN_MIRROR_COMMUNITY_PIPELINES }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN_MIRROR_COMMUNITY_PIPELINES }}
|
||||||
|
|
||||||
# Push to HF! (under subfolder based on checkout ref)
|
# Push to HF! (under subfolder based on checkout ref)
|
||||||
# https://huggingface.co/datasets/diffusers/community-pipelines-mirror
|
# https://huggingface.co/datasets/diffusers/community-pipelines-mirror
|
||||||
- name: Mirror community pipeline to HF
|
- name: Mirror community pipeline to HF
|
||||||
run: huggingface-cli upload diffusers/community-pipelines-mirror ./examples/community ${PATH_IN_REPO} --repo-type dataset
|
run: hf upload diffusers/community-pipelines-mirror ./examples/community ${PATH_IN_REPO} --repo-type dataset
|
||||||
env:
|
env:
|
||||||
PATH_IN_REPO: ${{ env.PATH_IN_REPO }}
|
PATH_IN_REPO: ${{ env.PATH_IN_REPO }}
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN_MIRROR_COMMUNITY_PIPELINES }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN_MIRROR_COMMUNITY_PIPELINES }}
|
||||||
|
|||||||
327
.github/workflows/nightly_tests.yml
vendored
327
.github/workflows/nightly_tests.yml
vendored
@@ -7,14 +7,15 @@ on:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
DIFFUSERS_IS_CI: yes
|
DIFFUSERS_IS_CI: yes
|
||||||
HF_HUB_ENABLE_HF_TRANSFER: 1
|
HF_XET_HIGH_PERFORMANCE: 1
|
||||||
OMP_NUM_THREADS: 8
|
OMP_NUM_THREADS: 8
|
||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
PYTEST_TIMEOUT: 600
|
PYTEST_TIMEOUT: 600
|
||||||
RUN_SLOW: yes
|
RUN_SLOW: yes
|
||||||
RUN_NIGHTLY: yes
|
RUN_NIGHTLY: yes
|
||||||
PIPELINE_USAGE_CUTOFF: 5000
|
PIPELINE_USAGE_CUTOFF: 0
|
||||||
SLACK_API_TOKEN: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
SLACK_API_TOKEN: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||||
|
CONSOLIDATED_REPORT_PATH: consolidated_test_report.md
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
setup_torch_cuda_pipeline_matrix:
|
setup_torch_cuda_pipeline_matrix:
|
||||||
@@ -60,7 +61,7 @@ jobs:
|
|||||||
group: aws-g4dn-2xlarge
|
group: aws-g4dn-2xlarge
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "16gb" --ipc host --gpus 0
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -70,10 +71,9 @@ jobs:
|
|||||||
run: nvidia-smi
|
run: nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
uv pip install pytest-reportlog
|
||||||
python -m uv pip install pytest-reportlog
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -83,7 +83,7 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-s -v -k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
||||||
--report-log=tests_pipeline_${{ matrix.module }}_cuda.log \
|
--report-log=tests_pipeline_${{ matrix.module }}_cuda.log \
|
||||||
@@ -99,11 +99,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: pipeline_${{ matrix.module }}_test_reports
|
name: pipeline_${{ matrix.module }}_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
- name: Generate Report and Notify Channel
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
pip install slack_sdk tabulate
|
|
||||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
|
||||||
|
|
||||||
run_nightly_tests_for_other_torch_modules:
|
run_nightly_tests_for_other_torch_modules:
|
||||||
name: Nightly Torch CUDA Tests
|
name: Nightly Torch CUDA Tests
|
||||||
@@ -111,7 +106,7 @@ jobs:
|
|||||||
group: aws-g4dn-2xlarge
|
group: aws-g4dn-2xlarge
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "16gb" --ipc host --gpus 0
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -128,11 +123,10 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install peft@git+https://github.com/huggingface/peft.git
|
||||||
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
uv pip install pytest-reportlog
|
||||||
python -m uv pip install pytest-reportlog
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: python utils/print_env.py
|
run: python utils/print_env.py
|
||||||
|
|
||||||
@@ -142,9 +136,8 @@ jobs:
|
|||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
RUN_COMPILE: yes
|
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-s -v -k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_torch_${{ matrix.module }}_cuda \
|
--make-reports=tests_torch_${{ matrix.module }}_cuda \
|
||||||
--report-log=tests_torch_${{ matrix.module }}_cuda.log \
|
--report-log=tests_torch_${{ matrix.module }}_cuda.log \
|
||||||
@@ -157,7 +150,7 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v --make-reports=examples_torch_cuda \
|
-s -v --make-reports=examples_torch_cuda \
|
||||||
--report-log=examples_torch_cuda.log \
|
--report-log=examples_torch_cuda.log \
|
||||||
examples/
|
examples/
|
||||||
@@ -175,12 +168,6 @@ jobs:
|
|||||||
name: torch_${{ matrix.module }}_cuda_test_reports
|
name: torch_${{ matrix.module }}_cuda_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
|
|
||||||
- name: Generate Report and Notify Channel
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
pip install slack_sdk tabulate
|
|
||||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
|
||||||
|
|
||||||
run_torch_compile_tests:
|
run_torch_compile_tests:
|
||||||
name: PyTorch Compile CUDA tests
|
name: PyTorch Compile CUDA tests
|
||||||
|
|
||||||
@@ -188,8 +175,8 @@ jobs:
|
|||||||
group: aws-g4dn-2xlarge
|
group: aws-g4dn-2xlarge
|
||||||
|
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-compile-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host
|
options: --gpus all --shm-size "16gb" --ipc host
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
@@ -202,8 +189,7 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality,training]"
|
||||||
python -m uv pip install -e [quality,test,training]
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -212,7 +198,7 @@ jobs:
|
|||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
RUN_COMPILE: yes
|
RUN_COMPILE: yes
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
run: cat reports/tests_torch_compile_cuda_failures_short.txt
|
run: cat reports/tests_torch_compile_cuda_failures_short.txt
|
||||||
@@ -224,12 +210,6 @@ jobs:
|
|||||||
name: torch_compile_test_reports
|
name: torch_compile_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
|
|
||||||
- name: Generate Report and Notify Channel
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
pip install slack_sdk tabulate
|
|
||||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
|
||||||
|
|
||||||
run_big_gpu_torch_tests:
|
run_big_gpu_torch_tests:
|
||||||
name: Torch tests on big GPU
|
name: Torch tests on big GPU
|
||||||
strategy:
|
strategy:
|
||||||
@@ -239,7 +219,7 @@ jobs:
|
|||||||
group: aws-g6e-xlarge-plus
|
group: aws-g6e-xlarge-plus
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "16gb" --ipc host --gpus 0
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -249,11 +229,10 @@ jobs:
|
|||||||
run: nvidia-smi
|
run: nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install peft@git+https://github.com/huggingface/peft.git
|
||||||
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
uv pip install pytest-reportlog
|
||||||
python -m uv pip install pytest-reportlog
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -264,8 +243,8 @@ jobs:
|
|||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
BIG_GPU_MEMORY: 40
|
BIG_GPU_MEMORY: 40
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-m "big_gpu_with_torch_cuda" \
|
-m "big_accelerator" \
|
||||||
--make-reports=tests_big_gpu_torch_cuda \
|
--make-reports=tests_big_gpu_torch_cuda \
|
||||||
--report-log=tests_big_gpu_torch_cuda.log \
|
--report-log=tests_big_gpu_torch_cuda.log \
|
||||||
tests/
|
tests/
|
||||||
@@ -280,19 +259,14 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: torch_cuda_big_gpu_test_reports
|
name: torch_cuda_big_gpu_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
- name: Generate Report and Notify Channel
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
pip install slack_sdk tabulate
|
|
||||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
|
||||||
|
|
||||||
torch_minimum_version_cuda_tests:
|
torch_minimum_version_cuda_tests:
|
||||||
name: Torch Minimum Version CUDA Tests
|
name: Torch Minimum Version CUDA Tests
|
||||||
runs-on:
|
runs-on:
|
||||||
group: aws-g4dn-2xlarge
|
group: aws-g4dn-2xlarge
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-minimum-cuda
|
image: diffusers/diffusers-pytorch-minimum-cuda
|
||||||
options: --shm-size "16gb" --ipc host --gpus 0
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -304,10 +278,9 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install peft@git+https://github.com/huggingface/peft.git
|
||||||
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
@@ -319,7 +292,7 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-s -v -k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_torch_minimum_version_cuda \
|
--make-reports=tests_torch_minimum_version_cuda \
|
||||||
tests/models/test_modeling_common.py \
|
tests/models/test_modeling_common.py \
|
||||||
@@ -342,143 +315,34 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: torch_minimum_version_cuda_test_reports
|
name: torch_minimum_version_cuda_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
|
|
||||||
run_flax_tpu_tests:
|
|
||||||
name: Nightly Flax TPU Tests
|
|
||||||
runs-on:
|
|
||||||
group: gcp-ct5lp-hightpu-8t
|
|
||||||
if: github.event_name == 'schedule'
|
|
||||||
|
|
||||||
container:
|
|
||||||
image: diffusers/diffusers-flax-tpu
|
|
||||||
options: --shm-size "16gb" --ipc host --privileged ${{ vars.V5_LITEPOD_8_ENV}} -v /mnt/hf_cache:/mnt/hf_cache
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
steps:
|
|
||||||
- name: Checkout diffusers
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 2
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
||||||
python -m uv pip install pytest-reportlog
|
|
||||||
|
|
||||||
- name: Environment
|
|
||||||
run: python utils/print_env.py
|
|
||||||
|
|
||||||
- name: Run nightly Flax TPU tests
|
|
||||||
env:
|
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
|
||||||
run: |
|
|
||||||
python -m pytest -n 0 \
|
|
||||||
-s -v -k "Flax" \
|
|
||||||
--make-reports=tests_flax_tpu \
|
|
||||||
--report-log=tests_flax_tpu.log \
|
|
||||||
tests/
|
|
||||||
|
|
||||||
- name: Failure short reports
|
|
||||||
if: ${{ failure() }}
|
|
||||||
run: |
|
|
||||||
cat reports/tests_flax_tpu_stats.txt
|
|
||||||
cat reports/tests_flax_tpu_failures_short.txt
|
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
|
||||||
if: ${{ always() }}
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: flax_tpu_test_reports
|
|
||||||
path: reports
|
|
||||||
|
|
||||||
- name: Generate Report and Notify Channel
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
pip install slack_sdk tabulate
|
|
||||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
|
||||||
|
|
||||||
run_nightly_onnx_tests:
|
|
||||||
name: Nightly ONNXRuntime CUDA tests on Ubuntu
|
|
||||||
runs-on:
|
|
||||||
group: aws-g4dn-2xlarge
|
|
||||||
container:
|
|
||||||
image: diffusers/diffusers-onnxruntime-cuda
|
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout diffusers
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 2
|
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
|
||||||
run: nvidia-smi
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
||||||
python -m uv pip install pytest-reportlog
|
|
||||||
- name: Environment
|
|
||||||
run: python utils/print_env.py
|
|
||||||
|
|
||||||
- name: Run Nightly ONNXRuntime CUDA tests
|
|
||||||
env:
|
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
|
||||||
run: |
|
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
|
||||||
-s -v -k "Onnx" \
|
|
||||||
--make-reports=tests_onnx_cuda \
|
|
||||||
--report-log=tests_onnx_cuda.log \
|
|
||||||
tests/
|
|
||||||
|
|
||||||
- name: Failure short reports
|
|
||||||
if: ${{ failure() }}
|
|
||||||
run: |
|
|
||||||
cat reports/tests_onnx_cuda_stats.txt
|
|
||||||
cat reports/tests_onnx_cuda_failures_short.txt
|
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
|
||||||
if: ${{ always() }}
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: tests_onnx_cuda_reports
|
|
||||||
path: reports
|
|
||||||
|
|
||||||
- name: Generate Report and Notify Channel
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
pip install slack_sdk tabulate
|
|
||||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
|
||||||
|
|
||||||
run_nightly_quantization_tests:
|
run_nightly_quantization_tests:
|
||||||
name: Torch quantization nightly tests
|
name: Torch quantization nightly tests
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
max-parallel: 2
|
max-parallel: 2
|
||||||
matrix:
|
matrix:
|
||||||
config:
|
config:
|
||||||
- backend: "bitsandbytes"
|
- backend: "bitsandbytes"
|
||||||
test_location: "bnb"
|
test_location: "bnb"
|
||||||
additional_deps: ["peft"]
|
additional_deps: ["peft"]
|
||||||
- backend: "gguf"
|
- backend: "gguf"
|
||||||
test_location: "gguf"
|
test_location: "gguf"
|
||||||
additional_deps: ["peft"]
|
additional_deps: ["peft", "kernels"]
|
||||||
- backend: "torchao"
|
- backend: "torchao"
|
||||||
test_location: "torchao"
|
test_location: "torchao"
|
||||||
additional_deps: []
|
additional_deps: []
|
||||||
- backend: "optimum_quanto"
|
- backend: "optimum_quanto"
|
||||||
test_location: "quanto"
|
test_location: "quanto"
|
||||||
additional_deps: []
|
additional_deps: []
|
||||||
|
- backend: "nvidia_modelopt"
|
||||||
|
test_location: "modelopt"
|
||||||
|
additional_deps: []
|
||||||
runs-on:
|
runs-on:
|
||||||
group: aws-g6e-xlarge-plus
|
group: aws-g6e-xlarge-plus
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "20gb" --ipc host --gpus 0
|
options: --shm-size "20gb" --ipc host --gpus all
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -488,13 +352,12 @@ jobs:
|
|||||||
run: nvidia-smi
|
run: nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install -U ${{ matrix.config.backend }}
|
||||||
python -m uv pip install -U ${{ matrix.config.backend }}
|
|
||||||
if [ "${{ join(matrix.config.additional_deps, ' ') }}" != "" ]; then
|
if [ "${{ join(matrix.config.additional_deps, ' ') }}" != "" ]; then
|
||||||
python -m uv pip install ${{ join(matrix.config.additional_deps, ' ') }}
|
uv pip install ${{ join(matrix.config.additional_deps, ' ') }}
|
||||||
fi
|
fi
|
||||||
python -m uv pip install pytest-reportlog
|
uv pip install pytest-reportlog
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -505,7 +368,7 @@ jobs:
|
|||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
BIG_GPU_MEMORY: 40
|
BIG_GPU_MEMORY: 40
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
--make-reports=tests_${{ matrix.config.backend }}_torch_cuda \
|
--make-reports=tests_${{ matrix.config.backend }}_torch_cuda \
|
||||||
--report-log=tests_${{ matrix.config.backend }}_torch_cuda.log \
|
--report-log=tests_${{ matrix.config.backend }}_torch_cuda.log \
|
||||||
tests/quantization/${{ matrix.config.test_location }}
|
tests/quantization/${{ matrix.config.test_location }}
|
||||||
@@ -520,12 +383,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: torch_cuda_${{ matrix.config.backend }}_reports
|
name: torch_cuda_${{ matrix.config.backend }}_reports
|
||||||
path: reports
|
path: reports
|
||||||
- name: Generate Report and Notify Channel
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
pip install slack_sdk tabulate
|
|
||||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
|
||||||
|
|
||||||
run_nightly_pipeline_level_quantization_tests:
|
run_nightly_pipeline_level_quantization_tests:
|
||||||
name: Torch quantization nightly tests
|
name: Torch quantization nightly tests
|
||||||
strategy:
|
strategy:
|
||||||
@@ -535,7 +393,7 @@ jobs:
|
|||||||
group: aws-g6e-xlarge-plus
|
group: aws-g6e-xlarge-plus
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "20gb" --ipc host --gpus 0
|
options: --shm-size "20gb" --ipc host --gpus all
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -545,10 +403,9 @@ jobs:
|
|||||||
run: nvidia-smi
|
run: nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install -U bitsandbytes optimum_quanto
|
||||||
python -m uv pip install -U bitsandbytes optimum_quanto
|
uv pip install pytest-reportlog
|
||||||
python -m uv pip install pytest-reportlog
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -559,7 +416,7 @@ jobs:
|
|||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
BIG_GPU_MEMORY: 40
|
BIG_GPU_MEMORY: 40
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
--make-reports=tests_pipeline_level_quant_torch_cuda \
|
--make-reports=tests_pipeline_level_quant_torch_cuda \
|
||||||
--report-log=tests_pipeline_level_quant_torch_cuda.log \
|
--report-log=tests_pipeline_level_quant_torch_cuda.log \
|
||||||
tests/quantization/test_pipeline_level_quantization.py
|
tests/quantization/test_pipeline_level_quantization.py
|
||||||
@@ -574,12 +431,66 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: torch_cuda_pipeline_level_quant_reports
|
name: torch_cuda_pipeline_level_quant_reports
|
||||||
path: reports
|
path: reports
|
||||||
- name: Generate Report and Notify Channel
|
|
||||||
if: always()
|
generate_consolidated_report:
|
||||||
|
name: Generate Consolidated Test Report
|
||||||
|
needs: [
|
||||||
|
run_nightly_tests_for_torch_pipelines,
|
||||||
|
run_nightly_tests_for_other_torch_modules,
|
||||||
|
run_torch_compile_tests,
|
||||||
|
run_big_gpu_torch_tests,
|
||||||
|
run_nightly_quantization_tests,
|
||||||
|
run_nightly_pipeline_level_quantization_tests,
|
||||||
|
# run_nightly_onnx_tests,
|
||||||
|
torch_minimum_version_cuda_tests,
|
||||||
|
# run_flax_tpu_tests
|
||||||
|
]
|
||||||
|
if: always()
|
||||||
|
runs-on:
|
||||||
|
group: aws-general-8-plus
|
||||||
|
container:
|
||||||
|
image: diffusers/diffusers-pytorch-cpu
|
||||||
|
steps:
|
||||||
|
- name: Checkout diffusers
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 2
|
||||||
|
|
||||||
|
- name: Create reports directory
|
||||||
|
run: mkdir -p combined_reports
|
||||||
|
|
||||||
|
- name: Download all test reports
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
path: artifacts
|
||||||
|
|
||||||
|
- name: Prepare reports
|
||||||
run: |
|
run: |
|
||||||
|
# Move all report files to a single directory for processing
|
||||||
|
find artifacts -name "*.txt" -exec cp {} combined_reports/ \;
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
pip install -e .[test]
|
||||||
pip install slack_sdk tabulate
|
pip install slack_sdk tabulate
|
||||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
|
||||||
|
- name: Generate consolidated report
|
||||||
|
run: |
|
||||||
|
python utils/consolidated_test_report.py \
|
||||||
|
--reports_dir combined_reports \
|
||||||
|
--output_file $CONSOLIDATED_REPORT_PATH \
|
||||||
|
--slack_channel_name diffusers-ci-nightly
|
||||||
|
|
||||||
|
- name: Show consolidated report
|
||||||
|
run: |
|
||||||
|
cat $CONSOLIDATED_REPORT_PATH >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
- name: Upload consolidated report
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: consolidated_test_report
|
||||||
|
path: ${{ env.CONSOLIDATED_REPORT_PATH }}
|
||||||
|
|
||||||
# M1 runner currently not well supported
|
# M1 runner currently not well supported
|
||||||
# TODO: (Dhruv) add these back when we setup better testing for Apple Silicon
|
# TODO: (Dhruv) add these back when we setup better testing for Apple Silicon
|
||||||
# run_nightly_tests_apple_m1:
|
# run_nightly_tests_apple_m1:
|
||||||
@@ -605,11 +516,11 @@ jobs:
|
|||||||
# - name: Install dependencies
|
# - name: Install dependencies
|
||||||
# shell: arch -arch arm64 bash {0}
|
# shell: arch -arch arm64 bash {0}
|
||||||
# run: |
|
# run: |
|
||||||
# ${CONDA_RUN} python -m pip install --upgrade pip uv
|
# ${CONDA_RUN} pip install --upgrade pip uv
|
||||||
# ${CONDA_RUN} python -m uv pip install -e [quality,test]
|
# ${CONDA_RUN} uv pip install -e ".[quality]"
|
||||||
# ${CONDA_RUN} python -m uv pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
|
# ${CONDA_RUN} uv pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
# ${CONDA_RUN} python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate
|
# ${CONDA_RUN} uv pip install accelerate@git+https://github.com/huggingface/accelerate
|
||||||
# ${CONDA_RUN} python -m uv pip install pytest-reportlog
|
# ${CONDA_RUN} uv pip install pytest-reportlog
|
||||||
# - name: Environment
|
# - name: Environment
|
||||||
# shell: arch -arch arm64 bash {0}
|
# shell: arch -arch arm64 bash {0}
|
||||||
# run: |
|
# run: |
|
||||||
@@ -620,7 +531,7 @@ jobs:
|
|||||||
# HF_HOME: /System/Volumes/Data/mnt/cache
|
# HF_HOME: /System/Volumes/Data/mnt/cache
|
||||||
# HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
# HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
# run: |
|
# run: |
|
||||||
# ${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
# ${CONDA_RUN} pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
||||||
# --report-log=tests_torch_mps.log \
|
# --report-log=tests_torch_mps.log \
|
||||||
# tests/
|
# tests/
|
||||||
# - name: Failure short reports
|
# - name: Failure short reports
|
||||||
@@ -661,11 +572,11 @@ jobs:
|
|||||||
# - name: Install dependencies
|
# - name: Install dependencies
|
||||||
# shell: arch -arch arm64 bash {0}
|
# shell: arch -arch arm64 bash {0}
|
||||||
# run: |
|
# run: |
|
||||||
# ${CONDA_RUN} python -m pip install --upgrade pip uv
|
# ${CONDA_RUN} pip install --upgrade pip uv
|
||||||
# ${CONDA_RUN} python -m uv pip install -e [quality,test]
|
# ${CONDA_RUN} uv pip install -e ".[quality]"
|
||||||
# ${CONDA_RUN} python -m uv pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
|
# ${CONDA_RUN} uv pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
# ${CONDA_RUN} python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate
|
# ${CONDA_RUN} uv pip install accelerate@git+https://github.com/huggingface/accelerate
|
||||||
# ${CONDA_RUN} python -m uv pip install pytest-reportlog
|
# ${CONDA_RUN} uv pip install pytest-reportlog
|
||||||
# - name: Environment
|
# - name: Environment
|
||||||
# shell: arch -arch arm64 bash {0}
|
# shell: arch -arch arm64 bash {0}
|
||||||
# run: |
|
# run: |
|
||||||
@@ -676,7 +587,7 @@ jobs:
|
|||||||
# HF_HOME: /System/Volumes/Data/mnt/cache
|
# HF_HOME: /System/Volumes/Data/mnt/cache
|
||||||
# HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
# HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
# run: |
|
# run: |
|
||||||
# ${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
# ${CONDA_RUN} pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
||||||
# --report-log=tests_torch_mps.log \
|
# --report-log=tests_torch_mps.log \
|
||||||
# tests/
|
# tests/
|
||||||
# - name: Failure short reports
|
# - name: Failure short reports
|
||||||
|
|||||||
9
.github/workflows/pr_dependency_test.yml
vendored
9
.github/workflows/pr_dependency_test.yml
vendored
@@ -25,11 +25,8 @@ jobs:
|
|||||||
python-version: "3.8"
|
python-version: "3.8"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pip install -e .
|
||||||
python -m pip install --upgrade pip uv
|
pip install pytest
|
||||||
python -m uv pip install -e .
|
|
||||||
python -m uv pip install pytest
|
|
||||||
- name: Check for soft dependencies
|
- name: Check for soft dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pytest tests/others/test_dependencies.py
|
||||||
pytest tests/others/test_dependencies.py
|
|
||||||
|
|||||||
38
.github/workflows/pr_flax_dependency_test.yml
vendored
38
.github/workflows/pr_flax_dependency_test.yml
vendored
@@ -1,38 +0,0 @@
|
|||||||
name: Run Flax dependency tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
paths:
|
|
||||||
- "src/diffusers/**.py"
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check_flax_dependencies:
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- name: Set up Python
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: "3.8"
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python -m pip install --upgrade pip uv
|
|
||||||
python -m uv pip install -e .
|
|
||||||
python -m uv pip install "jax[cpu]>=0.2.16,!=0.3.2"
|
|
||||||
python -m uv pip install "flax>=0.4.1"
|
|
||||||
python -m uv pip install "jaxlib>=0.1.65"
|
|
||||||
python -m uv pip install pytest
|
|
||||||
- name: Check for soft dependencies
|
|
||||||
run: |
|
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
pytest tests/others/test_dependencies.py
|
|
||||||
138
.github/workflows/pr_modular_tests.yml
vendored
Normal file
138
.github/workflows/pr_modular_tests.yml
vendored
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
name: Fast PR tests for Modular
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches: [main]
|
||||||
|
paths:
|
||||||
|
- "src/diffusers/modular_pipelines/**.py"
|
||||||
|
- "src/diffusers/models/modeling_utils.py"
|
||||||
|
- "src/diffusers/models/model_loading_utils.py"
|
||||||
|
- "src/diffusers/pipelines/pipeline_utils.py"
|
||||||
|
- "src/diffusers/pipeline_loading_utils.py"
|
||||||
|
- "src/diffusers/loaders/lora_base.py"
|
||||||
|
- "src/diffusers/loaders/lora_pipeline.py"
|
||||||
|
- "src/diffusers/loaders/peft.py"
|
||||||
|
- "tests/modular_pipelines/**.py"
|
||||||
|
- ".github/**.yml"
|
||||||
|
- "utils/**.py"
|
||||||
|
- "setup.py"
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- ci-*
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
DIFFUSERS_IS_CI: yes
|
||||||
|
HF_XET_HIGH_PERFORMANCE: 1
|
||||||
|
OMP_NUM_THREADS: 4
|
||||||
|
MKL_NUM_THREADS: 4
|
||||||
|
PYTEST_TIMEOUT: 60
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check_code_quality:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: "3.10"
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
pip install --upgrade pip
|
||||||
|
pip install .[quality]
|
||||||
|
- name: Check quality
|
||||||
|
run: make quality
|
||||||
|
- name: Check if failure
|
||||||
|
if: ${{ failure() }}
|
||||||
|
run: |
|
||||||
|
echo "Quality check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make style && make quality'" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
check_repository_consistency:
|
||||||
|
needs: check_code_quality
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: "3.10"
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
pip install --upgrade pip
|
||||||
|
pip install .[quality]
|
||||||
|
- name: Check repo consistency
|
||||||
|
run: |
|
||||||
|
python utils/check_copies.py
|
||||||
|
python utils/check_dummies.py
|
||||||
|
python utils/check_support_list.py
|
||||||
|
make deps_table_check_updated
|
||||||
|
- name: Check if failure
|
||||||
|
if: ${{ failure() }}
|
||||||
|
run: |
|
||||||
|
echo "Repo consistency check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make fix-copies'" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
run_fast_tests:
|
||||||
|
needs: [check_code_quality, check_repository_consistency]
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
config:
|
||||||
|
- name: Fast PyTorch Modular Pipeline CPU tests
|
||||||
|
framework: pytorch_pipelines
|
||||||
|
runner: aws-highmemory-32-plus
|
||||||
|
image: diffusers/diffusers-pytorch-cpu
|
||||||
|
report: torch_cpu_modular_pipelines
|
||||||
|
|
||||||
|
name: ${{ matrix.config.name }}
|
||||||
|
|
||||||
|
runs-on:
|
||||||
|
group: ${{ matrix.config.runner }}
|
||||||
|
|
||||||
|
container:
|
||||||
|
image: ${{ matrix.config.image }}
|
||||||
|
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout diffusers
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 2
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
uv pip install -e ".[quality]"
|
||||||
|
uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
|
||||||
|
|
||||||
|
- name: Environment
|
||||||
|
run: |
|
||||||
|
python utils/print_env.py
|
||||||
|
|
||||||
|
- name: Run fast PyTorch Pipeline CPU tests
|
||||||
|
if: ${{ matrix.config.framework == 'pytorch_pipelines' }}
|
||||||
|
run: |
|
||||||
|
pytest -n 8 --max-worker-restart=0 --dist=loadfile \
|
||||||
|
-s -v -k "not Flax and not Onnx" \
|
||||||
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
|
tests/modular_pipelines
|
||||||
|
|
||||||
|
- name: Failure short reports
|
||||||
|
if: ${{ failure() }}
|
||||||
|
run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
|
||||||
|
|
||||||
|
- name: Test suite reports artifacts
|
||||||
|
if: ${{ always() }}
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: pr_${{ matrix.config.framework }}_${{ matrix.config.report }}_test_reports
|
||||||
|
path: reports
|
||||||
|
|
||||||
|
|
||||||
2
.github/workflows/pr_style_bot.yml
vendored
2
.github/workflows/pr_style_bot.yml
vendored
@@ -14,4 +14,4 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
python_quality_dependencies: "[quality]"
|
python_quality_dependencies: "[quality]"
|
||||||
secrets:
|
secrets:
|
||||||
bot_token: ${{ secrets.GITHUB_TOKEN }}
|
bot_token: ${{ secrets.HF_STYLE_BOT_ACTION }}
|
||||||
19
.github/workflows/pr_test_fetcher.yml
vendored
19
.github/workflows/pr_test_fetcher.yml
vendored
@@ -33,8 +33,7 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -90,19 +89,16 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m pip install -e [quality,test]
|
uv pip install accelerate
|
||||||
python -m pip install accelerate
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run all selected tests on CPU
|
- name: Run all selected tests on CPU
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pytest -n 2 --dist=loadfile -v --make-reports=${{ matrix.modules }}_tests_cpu ${{ fromJson(needs.setup_pr_tests.outputs.test_map)[matrix.modules] }}
|
||||||
python -m pytest -n 2 --dist=loadfile -v --make-reports=${{ matrix.modules }}_tests_cpu ${{ fromJson(needs.setup_pr_tests.outputs.test_map)[matrix.modules] }}
|
|
||||||
|
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
@@ -148,19 +144,16 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pip install -e [quality]
|
||||||
python -m pip install -e [quality,test]
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run Hub tests for models, schedulers, and pipelines on a staging env
|
- name: Run Hub tests for models, schedulers, and pipelines on a staging env
|
||||||
if: ${{ matrix.config.framework == 'hub_tests_pytorch' }}
|
if: ${{ matrix.config.framework == 'hub_tests_pytorch' }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
HUGGINGFACE_CO_STAGING=true pytest \
|
||||||
HUGGINGFACE_CO_STAGING=true python -m pytest \
|
|
||||||
-m "is_staging_test" \
|
-m "is_staging_test" \
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
tests
|
tests
|
||||||
|
|||||||
67
.github/workflows/pr_tests.yml
vendored
67
.github/workflows/pr_tests.yml
vendored
@@ -22,7 +22,7 @@ concurrency:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
DIFFUSERS_IS_CI: yes
|
DIFFUSERS_IS_CI: yes
|
||||||
HF_HUB_ENABLE_HF_TRANSFER: 1
|
HF_XET_HIGH_PERFORMANCE: 1
|
||||||
OMP_NUM_THREADS: 4
|
OMP_NUM_THREADS: 4
|
||||||
MKL_NUM_THREADS: 4
|
MKL_NUM_THREADS: 4
|
||||||
PYTEST_TIMEOUT: 60
|
PYTEST_TIMEOUT: 60
|
||||||
@@ -38,7 +38,7 @@ jobs:
|
|||||||
python-version: "3.8"
|
python-version: "3.8"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install .[quality]
|
pip install .[quality]
|
||||||
- name: Check quality
|
- name: Check quality
|
||||||
run: make quality
|
run: make quality
|
||||||
@@ -58,7 +58,7 @@ jobs:
|
|||||||
python-version: "3.8"
|
python-version: "3.8"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install .[quality]
|
pip install .[quality]
|
||||||
- name: Check repo consistency
|
- name: Check repo consistency
|
||||||
run: |
|
run: |
|
||||||
@@ -87,11 +87,6 @@ jobs:
|
|||||||
runner: aws-general-8-plus
|
runner: aws-general-8-plus
|
||||||
image: diffusers/diffusers-pytorch-cpu
|
image: diffusers/diffusers-pytorch-cpu
|
||||||
report: torch_cpu_models_schedulers
|
report: torch_cpu_models_schedulers
|
||||||
- name: Fast Flax CPU tests
|
|
||||||
framework: flax
|
|
||||||
runner: aws-general-8-plus
|
|
||||||
image: diffusers/diffusers-flax-cpu
|
|
||||||
report: flax_cpu
|
|
||||||
- name: PyTorch Example CPU tests
|
- name: PyTorch Example CPU tests
|
||||||
framework: pytorch_examples
|
framework: pytorch_examples
|
||||||
runner: aws-general-8-plus
|
runner: aws-general-8-plus
|
||||||
@@ -119,21 +114,18 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run fast PyTorch Pipeline CPU tests
|
- name: Run fast PyTorch Pipeline CPU tests
|
||||||
if: ${{ matrix.config.framework == 'pytorch_pipelines' }}
|
if: ${{ matrix.config.framework == 'pytorch_pipelines' }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pytest -n 8 --max-worker-restart=0 --dist=loadfile \
|
||||||
python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile \
|
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-s -v -k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
tests/pipelines
|
tests/pipelines
|
||||||
@@ -141,27 +133,16 @@ jobs:
|
|||||||
- name: Run fast PyTorch Model Scheduler CPU tests
|
- name: Run fast PyTorch Model Scheduler CPU tests
|
||||||
if: ${{ matrix.config.framework == 'pytorch_models' }}
|
if: ${{ matrix.config.framework == 'pytorch_models' }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
|
||||||
-s -v -k "not Flax and not Onnx and not Dependency" \
|
-s -v -k "not Flax and not Onnx and not Dependency" \
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
tests/models tests/schedulers tests/others
|
tests/models tests/schedulers tests/others
|
||||||
|
|
||||||
- name: Run fast Flax TPU tests
|
|
||||||
if: ${{ matrix.config.framework == 'flax' }}
|
|
||||||
run: |
|
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
|
||||||
-s -v -k "Flax" \
|
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
|
||||||
tests
|
|
||||||
|
|
||||||
- name: Run example PyTorch CPU tests
|
- name: Run example PyTorch CPU tests
|
||||||
if: ${{ matrix.config.framework == 'pytorch_examples' }}
|
if: ${{ matrix.config.framework == 'pytorch_examples' }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install ".[training]"
|
||||||
python -m uv pip install peft timm
|
pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
examples
|
examples
|
||||||
|
|
||||||
@@ -209,19 +190,16 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run Hub tests for models, schedulers, and pipelines on a staging env
|
- name: Run Hub tests for models, schedulers, and pipelines on a staging env
|
||||||
if: ${{ matrix.config.framework == 'hub_tests_pytorch' }}
|
if: ${{ matrix.config.framework == 'hub_tests_pytorch' }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
HUGGINGFACE_CO_STAGING=true pytest \
|
||||||
HUGGINGFACE_CO_STAGING=true python -m pytest \
|
|
||||||
-m "is_staging_test" \
|
-m "is_staging_test" \
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
tests
|
tests
|
||||||
@@ -263,27 +241,24 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
# TODO (sayakpaul, DN6): revisit `--no-deps`
|
# TODO (sayakpaul, DN6): revisit `--no-deps`
|
||||||
python -m pip install -U peft@git+https://github.com/huggingface/peft.git --no-deps
|
uv pip install -U peft@git+https://github.com/huggingface/peft.git --no-deps
|
||||||
python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
uv pip install -U tokenizers
|
||||||
python -m uv pip install -U tokenizers
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
|
uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run fast PyTorch LoRA tests with PEFT
|
- name: Run fast PyTorch LoRA tests with PEFT
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
|
||||||
-s -v \
|
-s -v \
|
||||||
--make-reports=tests_peft_main \
|
--make-reports=tests_peft_main \
|
||||||
tests/lora/
|
tests/lora/
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v \
|
-s -v \
|
||||||
--make-reports=tests_models_lora_peft_main \
|
--make-reports=tests_models_lora_peft_main \
|
||||||
tests/models/ -k "lora"
|
tests/models/ -k "lora"
|
||||||
@@ -291,8 +266,8 @@ jobs:
|
|||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
run: |
|
run: |
|
||||||
cat reports/tests_lora_failures_short.txt
|
cat reports/tests_peft_main_failures_short.txt
|
||||||
cat reports/tests_models_lora_failures_short.txt
|
cat reports/tests_models_lora_peft_main_failures_short.txt
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
|
|||||||
53
.github/workflows/pr_tests_gpu.yml
vendored
53
.github/workflows/pr_tests_gpu.yml
vendored
@@ -13,6 +13,7 @@ on:
|
|||||||
- "src/diffusers/loaders/peft.py"
|
- "src/diffusers/loaders/peft.py"
|
||||||
- "tests/pipelines/test_pipelines_common.py"
|
- "tests/pipelines/test_pipelines_common.py"
|
||||||
- "tests/models/test_modeling_common.py"
|
- "tests/models/test_modeling_common.py"
|
||||||
|
- "examples/**/*.py"
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
@@ -23,7 +24,7 @@ env:
|
|||||||
DIFFUSERS_IS_CI: yes
|
DIFFUSERS_IS_CI: yes
|
||||||
OMP_NUM_THREADS: 8
|
OMP_NUM_THREADS: 8
|
||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
HF_HUB_ENABLE_HF_TRANSFER: 1
|
HF_XET_HIGH_PERFORMANCE: 1
|
||||||
PYTEST_TIMEOUT: 600
|
PYTEST_TIMEOUT: 600
|
||||||
PIPELINE_USAGE_CUTOFF: 1000000000 # set high cutoff so that only always-test pipelines run
|
PIPELINE_USAGE_CUTOFF: 1000000000 # set high cutoff so that only always-test pipelines run
|
||||||
|
|
||||||
@@ -38,7 +39,7 @@ jobs:
|
|||||||
python-version: "3.8"
|
python-version: "3.8"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install .[quality]
|
pip install .[quality]
|
||||||
- name: Check quality
|
- name: Check quality
|
||||||
run: make quality
|
run: make quality
|
||||||
@@ -58,7 +59,7 @@ jobs:
|
|||||||
python-version: "3.8"
|
python-version: "3.8"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install .[quality]
|
pip install .[quality]
|
||||||
- name: Check repo consistency
|
- name: Check repo consistency
|
||||||
run: |
|
run: |
|
||||||
@@ -87,8 +88,7 @@ jobs:
|
|||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -117,7 +117,7 @@ jobs:
|
|||||||
group: aws-g4dn-2xlarge
|
group: aws-g4dn-2xlarge
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "16gb" --ipc host --gpus 0
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -129,10 +129,9 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
@@ -151,13 +150,13 @@ jobs:
|
|||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
if [ "${{ matrix.module }}" = "ip_adapters" ]; then
|
if [ "${{ matrix.module }}" = "ip_adapters" ]; then
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-s -v -k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
||||||
tests/pipelines/${{ matrix.module }}
|
tests/pipelines/${{ matrix.module }}
|
||||||
else
|
else
|
||||||
pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }})
|
pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }})
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx and $pattern" \
|
-s -v -k "not Flax and not Onnx and $pattern" \
|
||||||
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
||||||
tests/pipelines/${{ matrix.module }}
|
tests/pipelines/${{ matrix.module }}
|
||||||
@@ -182,13 +181,13 @@ jobs:
|
|||||||
group: aws-g4dn-2xlarge
|
group: aws-g4dn-2xlarge
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "16gb" --ipc host --gpus 0
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
max-parallel: 2
|
max-parallel: 4
|
||||||
matrix:
|
matrix:
|
||||||
module: [models, schedulers, lora, others]
|
module: [models, schedulers, lora, others]
|
||||||
steps:
|
steps:
|
||||||
@@ -199,11 +198,10 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install peft@git+https://github.com/huggingface/peft.git
|
||||||
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
@@ -224,10 +222,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }})
|
pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }})
|
||||||
if [ -z "$pattern" ]; then
|
if [ -z "$pattern" ]; then
|
||||||
python -m pytest -n 1 -sv --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx" tests/${{ matrix.module }} \
|
pytest -n 1 -sv --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx" tests/${{ matrix.module }} \
|
||||||
--make-reports=tests_torch_cuda_${{ matrix.module }}
|
--make-reports=tests_torch_cuda_${{ matrix.module }}
|
||||||
else
|
else
|
||||||
python -m pytest -n 1 -sv --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx and $pattern" tests/${{ matrix.module }} \
|
pytest -n 1 -sv --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx and $pattern" tests/${{ matrix.module }} \
|
||||||
--make-reports=tests_torch_cuda_${{ matrix.module }}
|
--make-reports=tests_torch_cuda_${{ matrix.module }}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -252,7 +250,7 @@ jobs:
|
|||||||
|
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host
|
options: --gpus all --shm-size "16gb" --ipc host
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -264,22 +262,19 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
uv pip install -e ".[quality,training]"
|
||||||
python -m uv pip install -e [quality,test,training]
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run example tests on GPU
|
- name: Run example tests on GPU
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install ".[training]"
|
||||||
python -m uv pip install timm
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
|
|
||||||
|
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
|
|||||||
10
.github/workflows/pr_torch_dependency_test.yml
vendored
10
.github/workflows/pr_torch_dependency_test.yml
vendored
@@ -25,12 +25,8 @@ jobs:
|
|||||||
python-version: "3.8"
|
python-version: "3.8"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pip install -e .
|
||||||
python -m pip install --upgrade pip uv
|
pip install torch torchvision torchaudio pytest
|
||||||
python -m uv pip install -e .
|
|
||||||
python -m uv pip install torch torchvision torchaudio
|
|
||||||
python -m uv pip install pytest
|
|
||||||
- name: Check for soft dependencies
|
- name: Check for soft dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pytest tests/others/test_dependencies.py
|
||||||
pytest tests/others/test_dependencies.py
|
|
||||||
|
|||||||
148
.github/workflows/push_tests.yml
vendored
148
.github/workflows/push_tests.yml
vendored
@@ -14,7 +14,7 @@ env:
|
|||||||
DIFFUSERS_IS_CI: yes
|
DIFFUSERS_IS_CI: yes
|
||||||
OMP_NUM_THREADS: 8
|
OMP_NUM_THREADS: 8
|
||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
HF_HUB_ENABLE_HF_TRANSFER: 1
|
HF_XET_HIGH_PERFORMANCE: 1
|
||||||
PYTEST_TIMEOUT: 600
|
PYTEST_TIMEOUT: 600
|
||||||
PIPELINE_USAGE_CUTOFF: 50000
|
PIPELINE_USAGE_CUTOFF: 50000
|
||||||
|
|
||||||
@@ -34,8 +34,7 @@ jobs:
|
|||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -64,7 +63,7 @@ jobs:
|
|||||||
group: aws-g4dn-2xlarge
|
group: aws-g4dn-2xlarge
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "16gb" --ipc host --gpus 0
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -75,9 +74,8 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -87,7 +85,7 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-s -v -k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
||||||
tests/pipelines/${{ matrix.module }}
|
tests/pipelines/${{ matrix.module }}
|
||||||
@@ -109,7 +107,7 @@ jobs:
|
|||||||
group: aws-g4dn-2xlarge
|
group: aws-g4dn-2xlarge
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "16gb" --ipc host --gpus 0
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -126,10 +124,9 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install peft@git+https://github.com/huggingface/peft.git
|
||||||
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
@@ -141,7 +138,7 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-s -v -k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_torch_cuda_${{ matrix.module }} \
|
--make-reports=tests_torch_cuda_${{ matrix.module }} \
|
||||||
tests/${{ matrix.module }}
|
tests/${{ matrix.module }}
|
||||||
@@ -159,102 +156,6 @@ jobs:
|
|||||||
name: torch_cuda_test_reports_${{ matrix.module }}
|
name: torch_cuda_test_reports_${{ matrix.module }}
|
||||||
path: reports
|
path: reports
|
||||||
|
|
||||||
flax_tpu_tests:
|
|
||||||
name: Flax TPU Tests
|
|
||||||
runs-on:
|
|
||||||
group: gcp-ct5lp-hightpu-8t
|
|
||||||
container:
|
|
||||||
image: diffusers/diffusers-flax-tpu
|
|
||||||
options: --shm-size "16gb" --ipc host --privileged ${{ vars.V5_LITEPOD_8_ENV}} -v /mnt/hf_cache:/mnt/hf_cache
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
steps:
|
|
||||||
- name: Checkout diffusers
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 2
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
||||||
|
|
||||||
- name: Environment
|
|
||||||
run: |
|
|
||||||
python utils/print_env.py
|
|
||||||
|
|
||||||
- name: Run Flax TPU tests
|
|
||||||
env:
|
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
|
||||||
run: |
|
|
||||||
python -m pytest -n 0 \
|
|
||||||
-s -v -k "Flax" \
|
|
||||||
--make-reports=tests_flax_tpu \
|
|
||||||
tests/
|
|
||||||
|
|
||||||
- name: Failure short reports
|
|
||||||
if: ${{ failure() }}
|
|
||||||
run: |
|
|
||||||
cat reports/tests_flax_tpu_stats.txt
|
|
||||||
cat reports/tests_flax_tpu_failures_short.txt
|
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
|
||||||
if: ${{ always() }}
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: flax_tpu_test_reports
|
|
||||||
path: reports
|
|
||||||
|
|
||||||
onnx_cuda_tests:
|
|
||||||
name: ONNX CUDA Tests
|
|
||||||
runs-on:
|
|
||||||
group: aws-g4dn-2xlarge
|
|
||||||
container:
|
|
||||||
image: diffusers/diffusers-onnxruntime-cuda
|
|
||||||
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
steps:
|
|
||||||
- name: Checkout diffusers
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 2
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
||||||
|
|
||||||
- name: Environment
|
|
||||||
run: |
|
|
||||||
python utils/print_env.py
|
|
||||||
|
|
||||||
- name: Run ONNXRuntime CUDA tests
|
|
||||||
env:
|
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
|
||||||
run: |
|
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
|
||||||
-s -v -k "Onnx" \
|
|
||||||
--make-reports=tests_onnx_cuda \
|
|
||||||
tests/
|
|
||||||
|
|
||||||
- name: Failure short reports
|
|
||||||
if: ${{ failure() }}
|
|
||||||
run: |
|
|
||||||
cat reports/tests_onnx_cuda_stats.txt
|
|
||||||
cat reports/tests_onnx_cuda_failures_short.txt
|
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
|
||||||
if: ${{ always() }}
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: onnx_cuda_test_reports
|
|
||||||
path: reports
|
|
||||||
|
|
||||||
run_torch_compile_tests:
|
run_torch_compile_tests:
|
||||||
name: PyTorch Compile CUDA tests
|
name: PyTorch Compile CUDA tests
|
||||||
|
|
||||||
@@ -262,8 +163,8 @@ jobs:
|
|||||||
group: aws-g4dn-2xlarge
|
group: aws-g4dn-2xlarge
|
||||||
|
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-compile-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host
|
options: --gpus all --shm-size "16gb" --ipc host
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
@@ -276,8 +177,7 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality,training]"
|
||||||
python -m uv pip install -e [quality,test,training]
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -286,7 +186,7 @@ jobs:
|
|||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
RUN_COMPILE: yes
|
RUN_COMPILE: yes
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
run: cat reports/tests_torch_compile_cuda_failures_short.txt
|
run: cat reports/tests_torch_compile_cuda_failures_short.txt
|
||||||
@@ -306,7 +206,7 @@ jobs:
|
|||||||
|
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-xformers-cuda
|
image: diffusers/diffusers-pytorch-xformers-cuda
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host
|
options: --gpus all --shm-size "16gb" --ipc host
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
@@ -319,8 +219,7 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality,training]"
|
||||||
python -m uv pip install -e [quality,test,training]
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -328,7 +227,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
run: cat reports/tests_torch_xformers_cuda_failures_short.txt
|
run: cat reports/tests_torch_xformers_cuda_failures_short.txt
|
||||||
@@ -348,7 +247,7 @@ jobs:
|
|||||||
|
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host
|
options: --gpus all --shm-size "16gb" --ipc host
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -360,21 +259,18 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality,training]"
|
||||||
python -m uv pip install -e [quality,test,training]
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run example tests on GPU
|
- name: Run example tests on GPU
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install ".[training]"
|
||||||
python -m uv pip install timm
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
|
|
||||||
|
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
|
|||||||
42
.github/workflows/push_tests_fast.yml
vendored
42
.github/workflows/push_tests_fast.yml
vendored
@@ -18,7 +18,7 @@ env:
|
|||||||
HF_HOME: /mnt/cache
|
HF_HOME: /mnt/cache
|
||||||
OMP_NUM_THREADS: 8
|
OMP_NUM_THREADS: 8
|
||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
HF_HUB_ENABLE_HF_TRANSFER: 1
|
HF_XET_HIGH_PERFORMANCE: 1
|
||||||
PYTEST_TIMEOUT: 600
|
PYTEST_TIMEOUT: 600
|
||||||
RUN_SLOW: no
|
RUN_SLOW: no
|
||||||
|
|
||||||
@@ -33,16 +33,6 @@ jobs:
|
|||||||
runner: aws-general-8-plus
|
runner: aws-general-8-plus
|
||||||
image: diffusers/diffusers-pytorch-cpu
|
image: diffusers/diffusers-pytorch-cpu
|
||||||
report: torch_cpu
|
report: torch_cpu
|
||||||
- name: Fast Flax CPU tests on Ubuntu
|
|
||||||
framework: flax
|
|
||||||
runner: aws-general-8-plus
|
|
||||||
image: diffusers/diffusers-flax-cpu
|
|
||||||
report: flax_cpu
|
|
||||||
- name: Fast ONNXRuntime CPU tests on Ubuntu
|
|
||||||
framework: onnxruntime
|
|
||||||
runner: aws-general-8-plus
|
|
||||||
image: diffusers/diffusers-onnxruntime-cpu
|
|
||||||
report: onnx_cpu
|
|
||||||
- name: PyTorch Example CPU tests on Ubuntu
|
- name: PyTorch Example CPU tests on Ubuntu
|
||||||
framework: pytorch_examples
|
framework: pytorch_examples
|
||||||
runner: aws-general-8-plus
|
runner: aws-general-8-plus
|
||||||
@@ -70,47 +60,25 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run fast PyTorch CPU tests
|
- name: Run fast PyTorch CPU tests
|
||||||
if: ${{ matrix.config.framework == 'pytorch' }}
|
if: ${{ matrix.config.framework == 'pytorch' }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-s -v -k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
tests/
|
tests/
|
||||||
|
|
||||||
- name: Run fast Flax TPU tests
|
|
||||||
if: ${{ matrix.config.framework == 'flax' }}
|
|
||||||
run: |
|
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
|
||||||
-s -v -k "Flax" \
|
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
|
||||||
tests/
|
|
||||||
|
|
||||||
- name: Run fast ONNXRuntime CPU tests
|
|
||||||
if: ${{ matrix.config.framework == 'onnxruntime' }}
|
|
||||||
run: |
|
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
|
||||||
-s -v -k "Onnx" \
|
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
|
||||||
tests/
|
|
||||||
|
|
||||||
- name: Run example PyTorch CPU tests
|
- name: Run example PyTorch CPU tests
|
||||||
if: ${{ matrix.config.framework == 'pytorch_examples' }}
|
if: ${{ matrix.config.framework == 'pytorch_examples' }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install ".[training]"
|
||||||
python -m uv pip install peft timm
|
pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
examples
|
examples
|
||||||
|
|
||||||
|
|||||||
9
.github/workflows/push_tests_mps.yml
vendored
9
.github/workflows/push_tests_mps.yml
vendored
@@ -1,19 +1,14 @@
|
|||||||
name: Fast mps tests on main
|
name: Fast mps tests on main
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
workflow_dispatch:
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
paths:
|
|
||||||
- "src/diffusers/**.py"
|
|
||||||
- "tests/**.py"
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
DIFFUSERS_IS_CI: yes
|
DIFFUSERS_IS_CI: yes
|
||||||
HF_HOME: /mnt/cache
|
HF_HOME: /mnt/cache
|
||||||
OMP_NUM_THREADS: 8
|
OMP_NUM_THREADS: 8
|
||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
HF_HUB_ENABLE_HF_TRANSFER: 1
|
HF_XET_HIGH_PERFORMANCE: 1
|
||||||
PYTEST_TIMEOUT: 600
|
PYTEST_TIMEOUT: 600
|
||||||
RUN_SLOW: no
|
RUN_SLOW: no
|
||||||
|
|
||||||
|
|||||||
156
.github/workflows/release_tests_fast.yml
vendored
156
.github/workflows/release_tests_fast.yml
vendored
@@ -32,8 +32,7 @@ jobs:
|
|||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -62,7 +61,7 @@ jobs:
|
|||||||
group: aws-g4dn-2xlarge
|
group: aws-g4dn-2xlarge
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "16gb" --ipc host --gpus 0
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -73,9 +72,8 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -85,7 +83,7 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-s -v -k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
||||||
tests/pipelines/${{ matrix.module }}
|
tests/pipelines/${{ matrix.module }}
|
||||||
@@ -107,7 +105,7 @@ jobs:
|
|||||||
group: aws-g4dn-2xlarge
|
group: aws-g4dn-2xlarge
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --shm-size "16gb" --ipc host --gpus 0
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -124,10 +122,9 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install peft@git+https://github.com/huggingface/peft.git
|
||||||
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
@@ -139,7 +136,7 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-s -v -k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_torch_${{ matrix.module }}_cuda \
|
--make-reports=tests_torch_${{ matrix.module }}_cuda \
|
||||||
tests/${{ matrix.module }}
|
tests/${{ matrix.module }}
|
||||||
@@ -163,7 +160,7 @@ jobs:
|
|||||||
group: aws-g4dn-2xlarge
|
group: aws-g4dn-2xlarge
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-minimum-cuda
|
image: diffusers/diffusers-pytorch-minimum-cuda
|
||||||
options: --shm-size "16gb" --ipc host --gpus 0
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -175,10 +172,9 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install peft@git+https://github.com/huggingface/peft.git
|
||||||
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
@@ -190,7 +186,7 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-s -v -k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_torch_minimum_cuda \
|
--make-reports=tests_torch_minimum_cuda \
|
||||||
tests/models/test_modeling_common.py \
|
tests/models/test_modeling_common.py \
|
||||||
@@ -213,101 +209,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: torch_minimum_version_cuda_test_reports
|
name: torch_minimum_version_cuda_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
|
|
||||||
flax_tpu_tests:
|
|
||||||
name: Flax TPU Tests
|
|
||||||
runs-on: docker-tpu
|
|
||||||
container:
|
|
||||||
image: diffusers/diffusers-flax-tpu
|
|
||||||
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --privileged
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
steps:
|
|
||||||
- name: Checkout diffusers
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 2
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
||||||
|
|
||||||
- name: Environment
|
|
||||||
run: |
|
|
||||||
python utils/print_env.py
|
|
||||||
|
|
||||||
- name: Run slow Flax TPU tests
|
|
||||||
env:
|
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
|
||||||
run: |
|
|
||||||
python -m pytest -n 0 \
|
|
||||||
-s -v -k "Flax" \
|
|
||||||
--make-reports=tests_flax_tpu \
|
|
||||||
tests/
|
|
||||||
|
|
||||||
- name: Failure short reports
|
|
||||||
if: ${{ failure() }}
|
|
||||||
run: |
|
|
||||||
cat reports/tests_flax_tpu_stats.txt
|
|
||||||
cat reports/tests_flax_tpu_failures_short.txt
|
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
|
||||||
if: ${{ always() }}
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: flax_tpu_test_reports
|
|
||||||
path: reports
|
|
||||||
|
|
||||||
onnx_cuda_tests:
|
|
||||||
name: ONNX CUDA Tests
|
|
||||||
runs-on:
|
|
||||||
group: aws-g4dn-2xlarge
|
|
||||||
container:
|
|
||||||
image: diffusers/diffusers-onnxruntime-cuda
|
|
||||||
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
steps:
|
|
||||||
- name: Checkout diffusers
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 2
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
||||||
|
|
||||||
- name: Environment
|
|
||||||
run: |
|
|
||||||
python utils/print_env.py
|
|
||||||
|
|
||||||
- name: Run slow ONNXRuntime CUDA tests
|
|
||||||
env:
|
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
|
||||||
run: |
|
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
|
||||||
-s -v -k "Onnx" \
|
|
||||||
--make-reports=tests_onnx_cuda \
|
|
||||||
tests/
|
|
||||||
|
|
||||||
- name: Failure short reports
|
|
||||||
if: ${{ failure() }}
|
|
||||||
run: |
|
|
||||||
cat reports/tests_onnx_cuda_stats.txt
|
|
||||||
cat reports/tests_onnx_cuda_failures_short.txt
|
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
|
||||||
if: ${{ always() }}
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: onnx_cuda_test_reports
|
|
||||||
path: reports
|
|
||||||
|
|
||||||
run_torch_compile_tests:
|
run_torch_compile_tests:
|
||||||
name: PyTorch Compile CUDA tests
|
name: PyTorch Compile CUDA tests
|
||||||
@@ -316,8 +217,8 @@ jobs:
|
|||||||
group: aws-g4dn-2xlarge
|
group: aws-g4dn-2xlarge
|
||||||
|
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-compile-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host
|
options: --gpus all --shm-size "16gb" --ipc host
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
@@ -330,8 +231,7 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality,training]"
|
||||||
python -m uv pip install -e [quality,test,training]
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -340,7 +240,7 @@ jobs:
|
|||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
RUN_COMPILE: yes
|
RUN_COMPILE: yes
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
run: cat reports/tests_torch_compile_cuda_failures_short.txt
|
run: cat reports/tests_torch_compile_cuda_failures_short.txt
|
||||||
@@ -360,7 +260,7 @@ jobs:
|
|||||||
|
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-xformers-cuda
|
image: diffusers/diffusers-pytorch-xformers-cuda
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host
|
options: --gpus all --shm-size "16gb" --ipc host
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
@@ -373,8 +273,7 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality,training]"
|
||||||
python -m uv pip install -e [quality,test,training]
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -382,7 +281,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
run: cat reports/tests_torch_xformers_cuda_failures_short.txt
|
run: cat reports/tests_torch_xformers_cuda_failures_short.txt
|
||||||
@@ -402,7 +301,7 @@ jobs:
|
|||||||
|
|
||||||
container:
|
container:
|
||||||
image: diffusers/diffusers-pytorch-cuda
|
image: diffusers/diffusers-pytorch-cuda
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host
|
options: --gpus all --shm-size "16gb" --ipc host
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
@@ -416,21 +315,18 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality,training]"
|
||||||
python -m uv pip install -e [quality,test,training]
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run example tests on GPU
|
- name: Run example tests on GPU
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install ".[training]"
|
||||||
python -m uv pip install timm
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
|
|
||||||
|
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
|
|||||||
7
.github/workflows/run_tests_from_a_pr.yml
vendored
7
.github/workflows/run_tests_from_a_pr.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
|||||||
group: aws-g4dn-2xlarge
|
group: aws-g4dn-2xlarge
|
||||||
container:
|
container:
|
||||||
image: ${{ github.event.inputs.docker_image }}
|
image: ${{ github.event.inputs.docker_image }}
|
||||||
options: --gpus 0 --privileged --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
options: --gpus all --privileged --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Validate test files input
|
- name: Validate test files input
|
||||||
@@ -63,9 +63,8 @@ jobs:
|
|||||||
|
|
||||||
- name: Install pytest
|
- name: Install pytest
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install peft
|
||||||
python -m uv pip install peft
|
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
env:
|
env:
|
||||||
|
|||||||
2
.github/workflows/ssh-runner.yml
vendored
2
.github/workflows/ssh-runner.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
|||||||
group: "${{ github.event.inputs.runner_type }}"
|
group: "${{ github.event.inputs.runner_type }}"
|
||||||
container:
|
container:
|
||||||
image: ${{ github.event.inputs.docker_image }}
|
image: ${{ github.event.inputs.docker_image }}
|
||||||
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --gpus 0 --privileged
|
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --gpus all --privileged
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -125,6 +125,9 @@ dmypy.json
|
|||||||
.vs
|
.vs
|
||||||
.vscode
|
.vscode
|
||||||
|
|
||||||
|
# Cursor
|
||||||
|
.cursor
|
||||||
|
|
||||||
# Pycharm
|
# Pycharm
|
||||||
.idea
|
.idea
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
12
README.md
12
README.md
@@ -37,7 +37,7 @@ limitations under the License.
|
|||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
We recommend installing 🤗 Diffusers in a virtual environment from PyPI or Conda. For more details about installing [PyTorch](https://pytorch.org/get-started/locally/) and [Flax](https://flax.readthedocs.io/en/latest/#installation), please refer to their official documentation.
|
We recommend installing 🤗 Diffusers in a virtual environment from PyPI or Conda. For more details about installing [PyTorch](https://pytorch.org/get-started/locally/), please refer to their official documentation.
|
||||||
|
|
||||||
### PyTorch
|
### PyTorch
|
||||||
|
|
||||||
@@ -53,14 +53,6 @@ With `conda` (maintained by the community):
|
|||||||
conda install -c conda-forge diffusers
|
conda install -c conda-forge diffusers
|
||||||
```
|
```
|
||||||
|
|
||||||
### Flax
|
|
||||||
|
|
||||||
With `pip` (official package):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install --upgrade diffusers[flax]
|
|
||||||
```
|
|
||||||
|
|
||||||
### Apple Silicon (M1/M2) support
|
### Apple Silicon (M1/M2) support
|
||||||
|
|
||||||
Please refer to the [How to use Stable Diffusion in Apple Silicon](https://huggingface.co/docs/diffusers/optimization/mps) guide.
|
Please refer to the [How to use Stable Diffusion in Apple Silicon](https://huggingface.co/docs/diffusers/optimization/mps) guide.
|
||||||
@@ -179,7 +171,7 @@ Also, say 👋 in our public Discord channel <a href="https://discord.gg/G7tWnz9
|
|||||||
<tr style="border-top: 2px solid black">
|
<tr style="border-top: 2px solid black">
|
||||||
<td>Text-guided Image Inpainting</td>
|
<td>Text-guided Image Inpainting</td>
|
||||||
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/inpaint">Stable Diffusion Inpainting</a></td>
|
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/inpaint">Stable Diffusion Inpainting</a></td>
|
||||||
<td><a href="https://huggingface.co/runwayml/stable-diffusion-inpainting"> runwayml/stable-diffusion-inpainting </a></td>
|
<td><a href="https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-inpainting"> stable-diffusion-v1-5/stable-diffusion-inpainting </a></td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr style="border-top: 2px solid black">
|
<tr style="border-top: 2px solid black">
|
||||||
<td>Image Variation</td>
|
<td>Image Variation</td>
|
||||||
|
|||||||
69
benchmarks/README.md
Normal file
69
benchmarks/README.md
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
# Diffusers Benchmarks
|
||||||
|
|
||||||
|
Welcome to Diffusers Benchmarks. These benchmarks are use to obtain latency and memory information of the most popular models across different scenarios such as:
|
||||||
|
|
||||||
|
* Base case i.e., when using `torch.bfloat16` and `torch.nn.functional.scaled_dot_product_attention`.
|
||||||
|
* Base + `torch.compile()`
|
||||||
|
* NF4 quantization
|
||||||
|
* Layerwise upcasting
|
||||||
|
|
||||||
|
Instead of full diffusion pipelines, only the forward pass of the respective model classes (such as `FluxTransformer2DModel`) is tested with the real checkpoints (such as `"black-forest-labs/FLUX.1-dev"`).
|
||||||
|
|
||||||
|
The entrypoint to running all the currently available benchmarks is in `run_all.py`. However, one can run the individual benchmarks, too, e.g., `python benchmarking_flux.py`. It should produce a CSV file containing various information about the benchmarks run.
|
||||||
|
|
||||||
|
The benchmarks are run on a weekly basis and the CI is defined in [benchmark.yml](../.github/workflows/benchmark.yml).
|
||||||
|
|
||||||
|
## Running the benchmarks manually
|
||||||
|
|
||||||
|
First set up `torch` and install `diffusers` from the root of the directory:
|
||||||
|
|
||||||
|
```py
|
||||||
|
pip install -e ".[quality,test]"
|
||||||
|
```
|
||||||
|
|
||||||
|
Then make sure the other dependencies are installed:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cd benchmarks/
|
||||||
|
pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
We need to be authenticated to access some of the checkpoints used during benchmarking:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
hf auth login
|
||||||
|
```
|
||||||
|
|
||||||
|
We use an L40 GPU with 128GB RAM to run the benchmark CI. As such, the benchmarks are configured to run on NVIDIA GPUs. So, make sure you have access to a similar machine (or modify the benchmarking scripts accordingly).
|
||||||
|
|
||||||
|
Then you can either launch the entire benchmarking suite by running:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
python run_all.py
|
||||||
|
```
|
||||||
|
|
||||||
|
Or, you can run the individual benchmarks.
|
||||||
|
|
||||||
|
## Customizing the benchmarks
|
||||||
|
|
||||||
|
We define "scenarios" to cover the most common ways in which these models are used. You can
|
||||||
|
define a new scenario, modifying an existing benchmark file:
|
||||||
|
|
||||||
|
```py
|
||||||
|
BenchmarkScenario(
|
||||||
|
name=f"{CKPT_ID}-bnb-8bit",
|
||||||
|
model_cls=FluxTransformer2DModel,
|
||||||
|
model_init_kwargs={
|
||||||
|
"pretrained_model_name_or_path": CKPT_ID,
|
||||||
|
"torch_dtype": torch.bfloat16,
|
||||||
|
"subfolder": "transformer",
|
||||||
|
"quantization_config": BitsAndBytesConfig(load_in_8bit=True),
|
||||||
|
},
|
||||||
|
get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16),
|
||||||
|
model_init_fn=model_init_fn,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also configure a new model-level benchmark and add it to the existing suite. To do so, just defining a valid benchmarking file like `benchmarking_flux.py` should be enough.
|
||||||
|
|
||||||
|
Happy benchmarking 🧨
|
||||||
@@ -1,346 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import torch
|
|
||||||
|
|
||||||
from diffusers import (
|
|
||||||
AutoPipelineForImage2Image,
|
|
||||||
AutoPipelineForInpainting,
|
|
||||||
AutoPipelineForText2Image,
|
|
||||||
ControlNetModel,
|
|
||||||
LCMScheduler,
|
|
||||||
StableDiffusionAdapterPipeline,
|
|
||||||
StableDiffusionControlNetPipeline,
|
|
||||||
StableDiffusionXLAdapterPipeline,
|
|
||||||
StableDiffusionXLControlNetPipeline,
|
|
||||||
T2IAdapter,
|
|
||||||
WuerstchenCombinedPipeline,
|
|
||||||
)
|
|
||||||
from diffusers.utils import load_image
|
|
||||||
|
|
||||||
|
|
||||||
sys.path.append(".")
|
|
||||||
|
|
||||||
from utils import ( # noqa: E402
|
|
||||||
BASE_PATH,
|
|
||||||
PROMPT,
|
|
||||||
BenchmarkInfo,
|
|
||||||
benchmark_fn,
|
|
||||||
bytes_to_giga_bytes,
|
|
||||||
flush,
|
|
||||||
generate_csv_dict,
|
|
||||||
write_to_csv,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
RESOLUTION_MAPPING = {
|
|
||||||
"Lykon/DreamShaper": (512, 512),
|
|
||||||
"lllyasviel/sd-controlnet-canny": (512, 512),
|
|
||||||
"diffusers/controlnet-canny-sdxl-1.0": (1024, 1024),
|
|
||||||
"TencentARC/t2iadapter_canny_sd14v1": (512, 512),
|
|
||||||
"TencentARC/t2i-adapter-canny-sdxl-1.0": (1024, 1024),
|
|
||||||
"stabilityai/stable-diffusion-2-1": (768, 768),
|
|
||||||
"stabilityai/stable-diffusion-xl-base-1.0": (1024, 1024),
|
|
||||||
"stabilityai/stable-diffusion-xl-refiner-1.0": (1024, 1024),
|
|
||||||
"stabilityai/sdxl-turbo": (512, 512),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class BaseBenchmak:
|
|
||||||
pipeline_class = None
|
|
||||||
|
|
||||||
def __init__(self, args):
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
def run_inference(self, args):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def benchmark(self, args):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def get_result_filepath(self, args):
|
|
||||||
pipeline_class_name = str(self.pipe.__class__.__name__)
|
|
||||||
name = (
|
|
||||||
args.ckpt.replace("/", "_")
|
|
||||||
+ "_"
|
|
||||||
+ pipeline_class_name
|
|
||||||
+ f"-bs@{args.batch_size}-steps@{args.num_inference_steps}-mco@{args.model_cpu_offload}-compile@{args.run_compile}.csv"
|
|
||||||
)
|
|
||||||
filepath = os.path.join(BASE_PATH, name)
|
|
||||||
return filepath
|
|
||||||
|
|
||||||
|
|
||||||
class TextToImageBenchmark(BaseBenchmak):
|
|
||||||
pipeline_class = AutoPipelineForText2Image
|
|
||||||
|
|
||||||
def __init__(self, args):
|
|
||||||
pipe = self.pipeline_class.from_pretrained(args.ckpt, torch_dtype=torch.float16)
|
|
||||||
pipe = pipe.to("cuda")
|
|
||||||
|
|
||||||
if args.run_compile:
|
|
||||||
if not isinstance(pipe, WuerstchenCombinedPipeline):
|
|
||||||
pipe.unet.to(memory_format=torch.channels_last)
|
|
||||||
print("Run torch compile")
|
|
||||||
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
|
||||||
|
|
||||||
if hasattr(pipe, "movq") and getattr(pipe, "movq", None) is not None:
|
|
||||||
pipe.movq.to(memory_format=torch.channels_last)
|
|
||||||
pipe.movq = torch.compile(pipe.movq, mode="reduce-overhead", fullgraph=True)
|
|
||||||
else:
|
|
||||||
print("Run torch compile")
|
|
||||||
pipe.decoder = torch.compile(pipe.decoder, mode="reduce-overhead", fullgraph=True)
|
|
||||||
pipe.vqgan = torch.compile(pipe.vqgan, mode="reduce-overhead", fullgraph=True)
|
|
||||||
|
|
||||||
pipe.set_progress_bar_config(disable=True)
|
|
||||||
self.pipe = pipe
|
|
||||||
|
|
||||||
def run_inference(self, pipe, args):
|
|
||||||
_ = pipe(
|
|
||||||
prompt=PROMPT,
|
|
||||||
num_inference_steps=args.num_inference_steps,
|
|
||||||
num_images_per_prompt=args.batch_size,
|
|
||||||
)
|
|
||||||
|
|
||||||
def benchmark(self, args):
|
|
||||||
flush()
|
|
||||||
|
|
||||||
print(f"[INFO] {self.pipe.__class__.__name__}: Running benchmark with: {vars(args)}\n")
|
|
||||||
|
|
||||||
time = benchmark_fn(self.run_inference, self.pipe, args) # in seconds.
|
|
||||||
memory = bytes_to_giga_bytes(torch.cuda.max_memory_allocated()) # in GBs.
|
|
||||||
benchmark_info = BenchmarkInfo(time=time, memory=memory)
|
|
||||||
|
|
||||||
pipeline_class_name = str(self.pipe.__class__.__name__)
|
|
||||||
flush()
|
|
||||||
csv_dict = generate_csv_dict(
|
|
||||||
pipeline_cls=pipeline_class_name, ckpt=args.ckpt, args=args, benchmark_info=benchmark_info
|
|
||||||
)
|
|
||||||
filepath = self.get_result_filepath(args)
|
|
||||||
write_to_csv(filepath, csv_dict)
|
|
||||||
print(f"Logs written to: {filepath}")
|
|
||||||
flush()
|
|
||||||
|
|
||||||
|
|
||||||
class TurboTextToImageBenchmark(TextToImageBenchmark):
|
|
||||||
def __init__(self, args):
|
|
||||||
super().__init__(args)
|
|
||||||
|
|
||||||
def run_inference(self, pipe, args):
|
|
||||||
_ = pipe(
|
|
||||||
prompt=PROMPT,
|
|
||||||
num_inference_steps=args.num_inference_steps,
|
|
||||||
num_images_per_prompt=args.batch_size,
|
|
||||||
guidance_scale=0.0,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class LCMLoRATextToImageBenchmark(TextToImageBenchmark):
|
|
||||||
lora_id = "latent-consistency/lcm-lora-sdxl"
|
|
||||||
|
|
||||||
def __init__(self, args):
|
|
||||||
super().__init__(args)
|
|
||||||
self.pipe.load_lora_weights(self.lora_id)
|
|
||||||
self.pipe.fuse_lora()
|
|
||||||
self.pipe.unload_lora_weights()
|
|
||||||
self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
|
|
||||||
|
|
||||||
def get_result_filepath(self, args):
|
|
||||||
pipeline_class_name = str(self.pipe.__class__.__name__)
|
|
||||||
name = (
|
|
||||||
self.lora_id.replace("/", "_")
|
|
||||||
+ "_"
|
|
||||||
+ pipeline_class_name
|
|
||||||
+ f"-bs@{args.batch_size}-steps@{args.num_inference_steps}-mco@{args.model_cpu_offload}-compile@{args.run_compile}.csv"
|
|
||||||
)
|
|
||||||
filepath = os.path.join(BASE_PATH, name)
|
|
||||||
return filepath
|
|
||||||
|
|
||||||
def run_inference(self, pipe, args):
|
|
||||||
_ = pipe(
|
|
||||||
prompt=PROMPT,
|
|
||||||
num_inference_steps=args.num_inference_steps,
|
|
||||||
num_images_per_prompt=args.batch_size,
|
|
||||||
guidance_scale=1.0,
|
|
||||||
)
|
|
||||||
|
|
||||||
def benchmark(self, args):
|
|
||||||
flush()
|
|
||||||
|
|
||||||
print(f"[INFO] {self.pipe.__class__.__name__}: Running benchmark with: {vars(args)}\n")
|
|
||||||
|
|
||||||
time = benchmark_fn(self.run_inference, self.pipe, args) # in seconds.
|
|
||||||
memory = bytes_to_giga_bytes(torch.cuda.max_memory_allocated()) # in GBs.
|
|
||||||
benchmark_info = BenchmarkInfo(time=time, memory=memory)
|
|
||||||
|
|
||||||
pipeline_class_name = str(self.pipe.__class__.__name__)
|
|
||||||
flush()
|
|
||||||
csv_dict = generate_csv_dict(
|
|
||||||
pipeline_cls=pipeline_class_name, ckpt=self.lora_id, args=args, benchmark_info=benchmark_info
|
|
||||||
)
|
|
||||||
filepath = self.get_result_filepath(args)
|
|
||||||
write_to_csv(filepath, csv_dict)
|
|
||||||
print(f"Logs written to: {filepath}")
|
|
||||||
flush()
|
|
||||||
|
|
||||||
|
|
||||||
class ImageToImageBenchmark(TextToImageBenchmark):
|
|
||||||
pipeline_class = AutoPipelineForImage2Image
|
|
||||||
url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/benchmarking/1665_Girl_with_a_Pearl_Earring.jpg"
|
|
||||||
image = load_image(url).convert("RGB")
|
|
||||||
|
|
||||||
def __init__(self, args):
|
|
||||||
super().__init__(args)
|
|
||||||
self.image = self.image.resize(RESOLUTION_MAPPING[args.ckpt])
|
|
||||||
|
|
||||||
def run_inference(self, pipe, args):
|
|
||||||
_ = pipe(
|
|
||||||
prompt=PROMPT,
|
|
||||||
image=self.image,
|
|
||||||
num_inference_steps=args.num_inference_steps,
|
|
||||||
num_images_per_prompt=args.batch_size,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TurboImageToImageBenchmark(ImageToImageBenchmark):
|
|
||||||
def __init__(self, args):
|
|
||||||
super().__init__(args)
|
|
||||||
|
|
||||||
def run_inference(self, pipe, args):
|
|
||||||
_ = pipe(
|
|
||||||
prompt=PROMPT,
|
|
||||||
image=self.image,
|
|
||||||
num_inference_steps=args.num_inference_steps,
|
|
||||||
num_images_per_prompt=args.batch_size,
|
|
||||||
guidance_scale=0.0,
|
|
||||||
strength=0.5,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class InpaintingBenchmark(ImageToImageBenchmark):
|
|
||||||
pipeline_class = AutoPipelineForInpainting
|
|
||||||
mask_url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/benchmarking/overture-creations-5sI6fQgYIuo_mask.png"
|
|
||||||
mask = load_image(mask_url).convert("RGB")
|
|
||||||
|
|
||||||
def __init__(self, args):
|
|
||||||
super().__init__(args)
|
|
||||||
self.image = self.image.resize(RESOLUTION_MAPPING[args.ckpt])
|
|
||||||
self.mask = self.mask.resize(RESOLUTION_MAPPING[args.ckpt])
|
|
||||||
|
|
||||||
def run_inference(self, pipe, args):
|
|
||||||
_ = pipe(
|
|
||||||
prompt=PROMPT,
|
|
||||||
image=self.image,
|
|
||||||
mask_image=self.mask,
|
|
||||||
num_inference_steps=args.num_inference_steps,
|
|
||||||
num_images_per_prompt=args.batch_size,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class IPAdapterTextToImageBenchmark(TextToImageBenchmark):
|
|
||||||
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_neg_embed.png"
|
|
||||||
image = load_image(url)
|
|
||||||
|
|
||||||
def __init__(self, args):
|
|
||||||
pipe = self.pipeline_class.from_pretrained(args.ckpt, torch_dtype=torch.float16).to("cuda")
|
|
||||||
pipe.load_ip_adapter(
|
|
||||||
args.ip_adapter_id[0],
|
|
||||||
subfolder="models" if "sdxl" not in args.ip_adapter_id[1] else "sdxl_models",
|
|
||||||
weight_name=args.ip_adapter_id[1],
|
|
||||||
)
|
|
||||||
|
|
||||||
if args.run_compile:
|
|
||||||
pipe.unet.to(memory_format=torch.channels_last)
|
|
||||||
print("Run torch compile")
|
|
||||||
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
|
||||||
|
|
||||||
pipe.set_progress_bar_config(disable=True)
|
|
||||||
self.pipe = pipe
|
|
||||||
|
|
||||||
def run_inference(self, pipe, args):
|
|
||||||
_ = pipe(
|
|
||||||
prompt=PROMPT,
|
|
||||||
ip_adapter_image=self.image,
|
|
||||||
num_inference_steps=args.num_inference_steps,
|
|
||||||
num_images_per_prompt=args.batch_size,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ControlNetBenchmark(TextToImageBenchmark):
|
|
||||||
pipeline_class = StableDiffusionControlNetPipeline
|
|
||||||
aux_network_class = ControlNetModel
|
|
||||||
root_ckpt = "Lykon/DreamShaper"
|
|
||||||
|
|
||||||
url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/benchmarking/canny_image_condition.png"
|
|
||||||
image = load_image(url).convert("RGB")
|
|
||||||
|
|
||||||
def __init__(self, args):
|
|
||||||
aux_network = self.aux_network_class.from_pretrained(args.ckpt, torch_dtype=torch.float16)
|
|
||||||
pipe = self.pipeline_class.from_pretrained(self.root_ckpt, controlnet=aux_network, torch_dtype=torch.float16)
|
|
||||||
pipe = pipe.to("cuda")
|
|
||||||
|
|
||||||
pipe.set_progress_bar_config(disable=True)
|
|
||||||
self.pipe = pipe
|
|
||||||
|
|
||||||
if args.run_compile:
|
|
||||||
pipe.unet.to(memory_format=torch.channels_last)
|
|
||||||
pipe.controlnet.to(memory_format=torch.channels_last)
|
|
||||||
|
|
||||||
print("Run torch compile")
|
|
||||||
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
|
||||||
pipe.controlnet = torch.compile(pipe.controlnet, mode="reduce-overhead", fullgraph=True)
|
|
||||||
|
|
||||||
self.image = self.image.resize(RESOLUTION_MAPPING[args.ckpt])
|
|
||||||
|
|
||||||
def run_inference(self, pipe, args):
|
|
||||||
_ = pipe(
|
|
||||||
prompt=PROMPT,
|
|
||||||
image=self.image,
|
|
||||||
num_inference_steps=args.num_inference_steps,
|
|
||||||
num_images_per_prompt=args.batch_size,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ControlNetSDXLBenchmark(ControlNetBenchmark):
|
|
||||||
pipeline_class = StableDiffusionXLControlNetPipeline
|
|
||||||
root_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
|
|
||||||
|
|
||||||
def __init__(self, args):
|
|
||||||
super().__init__(args)
|
|
||||||
|
|
||||||
|
|
||||||
class T2IAdapterBenchmark(ControlNetBenchmark):
|
|
||||||
pipeline_class = StableDiffusionAdapterPipeline
|
|
||||||
aux_network_class = T2IAdapter
|
|
||||||
root_ckpt = "Lykon/DreamShaper"
|
|
||||||
|
|
||||||
url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/benchmarking/canny_for_adapter.png"
|
|
||||||
image = load_image(url).convert("L")
|
|
||||||
|
|
||||||
def __init__(self, args):
|
|
||||||
aux_network = self.aux_network_class.from_pretrained(args.ckpt, torch_dtype=torch.float16)
|
|
||||||
pipe = self.pipeline_class.from_pretrained(self.root_ckpt, adapter=aux_network, torch_dtype=torch.float16)
|
|
||||||
pipe = pipe.to("cuda")
|
|
||||||
|
|
||||||
pipe.set_progress_bar_config(disable=True)
|
|
||||||
self.pipe = pipe
|
|
||||||
|
|
||||||
if args.run_compile:
|
|
||||||
pipe.unet.to(memory_format=torch.channels_last)
|
|
||||||
pipe.adapter.to(memory_format=torch.channels_last)
|
|
||||||
|
|
||||||
print("Run torch compile")
|
|
||||||
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
|
||||||
pipe.adapter = torch.compile(pipe.adapter, mode="reduce-overhead", fullgraph=True)
|
|
||||||
|
|
||||||
self.image = self.image.resize(RESOLUTION_MAPPING[args.ckpt])
|
|
||||||
|
|
||||||
|
|
||||||
class T2IAdapterSDXLBenchmark(T2IAdapterBenchmark):
|
|
||||||
pipeline_class = StableDiffusionXLAdapterPipeline
|
|
||||||
root_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
|
|
||||||
|
|
||||||
url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/benchmarking/canny_for_adapter_sdxl.png"
|
|
||||||
image = load_image(url)
|
|
||||||
|
|
||||||
def __init__(self, args):
|
|
||||||
super().__init__(args)
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
import argparse
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
sys.path.append(".")
|
|
||||||
from base_classes import ControlNetBenchmark, ControlNetSDXLBenchmark # noqa: E402
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument(
|
|
||||||
"--ckpt",
|
|
||||||
type=str,
|
|
||||||
default="lllyasviel/sd-controlnet-canny",
|
|
||||||
choices=["lllyasviel/sd-controlnet-canny", "diffusers/controlnet-canny-sdxl-1.0"],
|
|
||||||
)
|
|
||||||
parser.add_argument("--batch_size", type=int, default=1)
|
|
||||||
parser.add_argument("--num_inference_steps", type=int, default=50)
|
|
||||||
parser.add_argument("--model_cpu_offload", action="store_true")
|
|
||||||
parser.add_argument("--run_compile", action="store_true")
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
benchmark_pipe = (
|
|
||||||
ControlNetBenchmark(args) if args.ckpt == "lllyasviel/sd-controlnet-canny" else ControlNetSDXLBenchmark(args)
|
|
||||||
)
|
|
||||||
benchmark_pipe.benchmark(args)
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
import argparse
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
sys.path.append(".")
|
|
||||||
from base_classes import IPAdapterTextToImageBenchmark # noqa: E402
|
|
||||||
|
|
||||||
|
|
||||||
IP_ADAPTER_CKPTS = {
|
|
||||||
# because original SD v1.5 has been taken down.
|
|
||||||
"Lykon/DreamShaper": ("h94/IP-Adapter", "ip-adapter_sd15.bin"),
|
|
||||||
"stabilityai/stable-diffusion-xl-base-1.0": ("h94/IP-Adapter", "ip-adapter_sdxl.bin"),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument(
|
|
||||||
"--ckpt",
|
|
||||||
type=str,
|
|
||||||
default="rstabilityai/stable-diffusion-xl-base-1.0",
|
|
||||||
choices=list(IP_ADAPTER_CKPTS.keys()),
|
|
||||||
)
|
|
||||||
parser.add_argument("--batch_size", type=int, default=1)
|
|
||||||
parser.add_argument("--num_inference_steps", type=int, default=50)
|
|
||||||
parser.add_argument("--model_cpu_offload", action="store_true")
|
|
||||||
parser.add_argument("--run_compile", action="store_true")
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
args.ip_adapter_id = IP_ADAPTER_CKPTS[args.ckpt]
|
|
||||||
benchmark_pipe = IPAdapterTextToImageBenchmark(args)
|
|
||||||
args.ckpt = f"{args.ckpt} (IP-Adapter)"
|
|
||||||
benchmark_pipe.benchmark(args)
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
import argparse
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
sys.path.append(".")
|
|
||||||
from base_classes import ImageToImageBenchmark, TurboImageToImageBenchmark # noqa: E402
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument(
|
|
||||||
"--ckpt",
|
|
||||||
type=str,
|
|
||||||
default="Lykon/DreamShaper",
|
|
||||||
choices=[
|
|
||||||
"Lykon/DreamShaper",
|
|
||||||
"stabilityai/stable-diffusion-2-1",
|
|
||||||
"stabilityai/stable-diffusion-xl-refiner-1.0",
|
|
||||||
"stabilityai/sdxl-turbo",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
parser.add_argument("--batch_size", type=int, default=1)
|
|
||||||
parser.add_argument("--num_inference_steps", type=int, default=50)
|
|
||||||
parser.add_argument("--model_cpu_offload", action="store_true")
|
|
||||||
parser.add_argument("--run_compile", action="store_true")
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
benchmark_pipe = ImageToImageBenchmark(args) if "turbo" not in args.ckpt else TurboImageToImageBenchmark(args)
|
|
||||||
benchmark_pipe.benchmark(args)
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
import argparse
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
sys.path.append(".")
|
|
||||||
from base_classes import InpaintingBenchmark # noqa: E402
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument(
|
|
||||||
"--ckpt",
|
|
||||||
type=str,
|
|
||||||
default="Lykon/DreamShaper",
|
|
||||||
choices=[
|
|
||||||
"Lykon/DreamShaper",
|
|
||||||
"stabilityai/stable-diffusion-2-1",
|
|
||||||
"stabilityai/stable-diffusion-xl-base-1.0",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
parser.add_argument("--batch_size", type=int, default=1)
|
|
||||||
parser.add_argument("--num_inference_steps", type=int, default=50)
|
|
||||||
parser.add_argument("--model_cpu_offload", action="store_true")
|
|
||||||
parser.add_argument("--run_compile", action="store_true")
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
benchmark_pipe = InpaintingBenchmark(args)
|
|
||||||
benchmark_pipe.benchmark(args)
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
import argparse
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
sys.path.append(".")
|
|
||||||
from base_classes import T2IAdapterBenchmark, T2IAdapterSDXLBenchmark # noqa: E402
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument(
|
|
||||||
"--ckpt",
|
|
||||||
type=str,
|
|
||||||
default="TencentARC/t2iadapter_canny_sd14v1",
|
|
||||||
choices=["TencentARC/t2iadapter_canny_sd14v1", "TencentARC/t2i-adapter-canny-sdxl-1.0"],
|
|
||||||
)
|
|
||||||
parser.add_argument("--batch_size", type=int, default=1)
|
|
||||||
parser.add_argument("--num_inference_steps", type=int, default=50)
|
|
||||||
parser.add_argument("--model_cpu_offload", action="store_true")
|
|
||||||
parser.add_argument("--run_compile", action="store_true")
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
benchmark_pipe = (
|
|
||||||
T2IAdapterBenchmark(args)
|
|
||||||
if args.ckpt == "TencentARC/t2iadapter_canny_sd14v1"
|
|
||||||
else T2IAdapterSDXLBenchmark(args)
|
|
||||||
)
|
|
||||||
benchmark_pipe.benchmark(args)
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
import argparse
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
sys.path.append(".")
|
|
||||||
from base_classes import LCMLoRATextToImageBenchmark # noqa: E402
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument(
|
|
||||||
"--ckpt",
|
|
||||||
type=str,
|
|
||||||
default="stabilityai/stable-diffusion-xl-base-1.0",
|
|
||||||
)
|
|
||||||
parser.add_argument("--batch_size", type=int, default=1)
|
|
||||||
parser.add_argument("--num_inference_steps", type=int, default=4)
|
|
||||||
parser.add_argument("--model_cpu_offload", action="store_true")
|
|
||||||
parser.add_argument("--run_compile", action="store_true")
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
benchmark_pipe = LCMLoRATextToImageBenchmark(args)
|
|
||||||
benchmark_pipe.benchmark(args)
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
import argparse
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
sys.path.append(".")
|
|
||||||
from base_classes import TextToImageBenchmark, TurboTextToImageBenchmark # noqa: E402
|
|
||||||
|
|
||||||
|
|
||||||
ALL_T2I_CKPTS = [
|
|
||||||
"Lykon/DreamShaper",
|
|
||||||
"segmind/SSD-1B",
|
|
||||||
"stabilityai/stable-diffusion-xl-base-1.0",
|
|
||||||
"kandinsky-community/kandinsky-2-2-decoder",
|
|
||||||
"warp-ai/wuerstchen",
|
|
||||||
"stabilityai/sdxl-turbo",
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument(
|
|
||||||
"--ckpt",
|
|
||||||
type=str,
|
|
||||||
default="Lykon/DreamShaper",
|
|
||||||
choices=ALL_T2I_CKPTS,
|
|
||||||
)
|
|
||||||
parser.add_argument("--batch_size", type=int, default=1)
|
|
||||||
parser.add_argument("--num_inference_steps", type=int, default=50)
|
|
||||||
parser.add_argument("--model_cpu_offload", action="store_true")
|
|
||||||
parser.add_argument("--run_compile", action="store_true")
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
benchmark_cls = None
|
|
||||||
if "turbo" in args.ckpt:
|
|
||||||
benchmark_cls = TurboTextToImageBenchmark
|
|
||||||
else:
|
|
||||||
benchmark_cls = TextToImageBenchmark
|
|
||||||
|
|
||||||
benchmark_pipe = benchmark_cls(args)
|
|
||||||
benchmark_pipe.benchmark(args)
|
|
||||||
98
benchmarks/benchmarking_flux.py
Normal file
98
benchmarks/benchmarking_flux.py
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
from functools import partial
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from benchmarking_utils import BenchmarkMixin, BenchmarkScenario, model_init_fn
|
||||||
|
|
||||||
|
from diffusers import BitsAndBytesConfig, FluxTransformer2DModel
|
||||||
|
from diffusers.utils.testing_utils import torch_device
|
||||||
|
|
||||||
|
|
||||||
|
CKPT_ID = "black-forest-labs/FLUX.1-dev"
|
||||||
|
RESULT_FILENAME = "flux.csv"
|
||||||
|
|
||||||
|
|
||||||
|
def get_input_dict(**device_dtype_kwargs):
|
||||||
|
# resolution: 1024x1024
|
||||||
|
# maximum sequence length 512
|
||||||
|
hidden_states = torch.randn(1, 4096, 64, **device_dtype_kwargs)
|
||||||
|
encoder_hidden_states = torch.randn(1, 512, 4096, **device_dtype_kwargs)
|
||||||
|
pooled_prompt_embeds = torch.randn(1, 768, **device_dtype_kwargs)
|
||||||
|
image_ids = torch.ones(512, 3, **device_dtype_kwargs)
|
||||||
|
text_ids = torch.ones(4096, 3, **device_dtype_kwargs)
|
||||||
|
timestep = torch.tensor([1.0], **device_dtype_kwargs)
|
||||||
|
guidance = torch.tensor([1.0], **device_dtype_kwargs)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"hidden_states": hidden_states,
|
||||||
|
"encoder_hidden_states": encoder_hidden_states,
|
||||||
|
"img_ids": image_ids,
|
||||||
|
"txt_ids": text_ids,
|
||||||
|
"pooled_projections": pooled_prompt_embeds,
|
||||||
|
"timestep": timestep,
|
||||||
|
"guidance": guidance,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
scenarios = [
|
||||||
|
BenchmarkScenario(
|
||||||
|
name=f"{CKPT_ID}-bf16",
|
||||||
|
model_cls=FluxTransformer2DModel,
|
||||||
|
model_init_kwargs={
|
||||||
|
"pretrained_model_name_or_path": CKPT_ID,
|
||||||
|
"torch_dtype": torch.bfloat16,
|
||||||
|
"subfolder": "transformer",
|
||||||
|
},
|
||||||
|
get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16),
|
||||||
|
model_init_fn=model_init_fn,
|
||||||
|
compile_kwargs={"fullgraph": True},
|
||||||
|
),
|
||||||
|
BenchmarkScenario(
|
||||||
|
name=f"{CKPT_ID}-bnb-nf4",
|
||||||
|
model_cls=FluxTransformer2DModel,
|
||||||
|
model_init_kwargs={
|
||||||
|
"pretrained_model_name_or_path": CKPT_ID,
|
||||||
|
"torch_dtype": torch.bfloat16,
|
||||||
|
"subfolder": "transformer",
|
||||||
|
"quantization_config": BitsAndBytesConfig(
|
||||||
|
load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type="nf4"
|
||||||
|
),
|
||||||
|
},
|
||||||
|
get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16),
|
||||||
|
model_init_fn=model_init_fn,
|
||||||
|
),
|
||||||
|
BenchmarkScenario(
|
||||||
|
name=f"{CKPT_ID}-layerwise-upcasting",
|
||||||
|
model_cls=FluxTransformer2DModel,
|
||||||
|
model_init_kwargs={
|
||||||
|
"pretrained_model_name_or_path": CKPT_ID,
|
||||||
|
"torch_dtype": torch.bfloat16,
|
||||||
|
"subfolder": "transformer",
|
||||||
|
},
|
||||||
|
get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16),
|
||||||
|
model_init_fn=partial(model_init_fn, layerwise_upcasting=True),
|
||||||
|
),
|
||||||
|
BenchmarkScenario(
|
||||||
|
name=f"{CKPT_ID}-group-offload-leaf",
|
||||||
|
model_cls=FluxTransformer2DModel,
|
||||||
|
model_init_kwargs={
|
||||||
|
"pretrained_model_name_or_path": CKPT_ID,
|
||||||
|
"torch_dtype": torch.bfloat16,
|
||||||
|
"subfolder": "transformer",
|
||||||
|
},
|
||||||
|
get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16),
|
||||||
|
model_init_fn=partial(
|
||||||
|
model_init_fn,
|
||||||
|
group_offload_kwargs={
|
||||||
|
"onload_device": torch_device,
|
||||||
|
"offload_device": torch.device("cpu"),
|
||||||
|
"offload_type": "leaf_level",
|
||||||
|
"use_stream": True,
|
||||||
|
"non_blocking": True,
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
runner = BenchmarkMixin()
|
||||||
|
runner.run_bencmarks_and_collate(scenarios, filename=RESULT_FILENAME)
|
||||||
80
benchmarks/benchmarking_ltx.py
Normal file
80
benchmarks/benchmarking_ltx.py
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
from functools import partial
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from benchmarking_utils import BenchmarkMixin, BenchmarkScenario, model_init_fn
|
||||||
|
|
||||||
|
from diffusers import LTXVideoTransformer3DModel
|
||||||
|
from diffusers.utils.testing_utils import torch_device
|
||||||
|
|
||||||
|
|
||||||
|
CKPT_ID = "Lightricks/LTX-Video-0.9.7-dev"
|
||||||
|
RESULT_FILENAME = "ltx.csv"
|
||||||
|
|
||||||
|
|
||||||
|
def get_input_dict(**device_dtype_kwargs):
|
||||||
|
# 512x704 (161 frames)
|
||||||
|
# `max_sequence_length`: 256
|
||||||
|
hidden_states = torch.randn(1, 7392, 128, **device_dtype_kwargs)
|
||||||
|
encoder_hidden_states = torch.randn(1, 256, 4096, **device_dtype_kwargs)
|
||||||
|
encoder_attention_mask = torch.ones(1, 256, **device_dtype_kwargs)
|
||||||
|
timestep = torch.tensor([1.0], **device_dtype_kwargs)
|
||||||
|
video_coords = torch.randn(1, 3, 7392, **device_dtype_kwargs)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"hidden_states": hidden_states,
|
||||||
|
"encoder_hidden_states": encoder_hidden_states,
|
||||||
|
"encoder_attention_mask": encoder_attention_mask,
|
||||||
|
"timestep": timestep,
|
||||||
|
"video_coords": video_coords,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
scenarios = [
|
||||||
|
BenchmarkScenario(
|
||||||
|
name=f"{CKPT_ID}-bf16",
|
||||||
|
model_cls=LTXVideoTransformer3DModel,
|
||||||
|
model_init_kwargs={
|
||||||
|
"pretrained_model_name_or_path": CKPT_ID,
|
||||||
|
"torch_dtype": torch.bfloat16,
|
||||||
|
"subfolder": "transformer",
|
||||||
|
},
|
||||||
|
get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16),
|
||||||
|
model_init_fn=model_init_fn,
|
||||||
|
compile_kwargs={"fullgraph": True},
|
||||||
|
),
|
||||||
|
BenchmarkScenario(
|
||||||
|
name=f"{CKPT_ID}-layerwise-upcasting",
|
||||||
|
model_cls=LTXVideoTransformer3DModel,
|
||||||
|
model_init_kwargs={
|
||||||
|
"pretrained_model_name_or_path": CKPT_ID,
|
||||||
|
"torch_dtype": torch.bfloat16,
|
||||||
|
"subfolder": "transformer",
|
||||||
|
},
|
||||||
|
get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16),
|
||||||
|
model_init_fn=partial(model_init_fn, layerwise_upcasting=True),
|
||||||
|
),
|
||||||
|
BenchmarkScenario(
|
||||||
|
name=f"{CKPT_ID}-group-offload-leaf",
|
||||||
|
model_cls=LTXVideoTransformer3DModel,
|
||||||
|
model_init_kwargs={
|
||||||
|
"pretrained_model_name_or_path": CKPT_ID,
|
||||||
|
"torch_dtype": torch.bfloat16,
|
||||||
|
"subfolder": "transformer",
|
||||||
|
},
|
||||||
|
get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16),
|
||||||
|
model_init_fn=partial(
|
||||||
|
model_init_fn,
|
||||||
|
group_offload_kwargs={
|
||||||
|
"onload_device": torch_device,
|
||||||
|
"offload_device": torch.device("cpu"),
|
||||||
|
"offload_type": "leaf_level",
|
||||||
|
"use_stream": True,
|
||||||
|
"non_blocking": True,
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
runner = BenchmarkMixin()
|
||||||
|
runner.run_bencmarks_and_collate(scenarios, filename=RESULT_FILENAME)
|
||||||
82
benchmarks/benchmarking_sdxl.py
Normal file
82
benchmarks/benchmarking_sdxl.py
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
from functools import partial
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from benchmarking_utils import BenchmarkMixin, BenchmarkScenario, model_init_fn
|
||||||
|
|
||||||
|
from diffusers import UNet2DConditionModel
|
||||||
|
from diffusers.utils.testing_utils import torch_device
|
||||||
|
|
||||||
|
|
||||||
|
CKPT_ID = "stabilityai/stable-diffusion-xl-base-1.0"
|
||||||
|
RESULT_FILENAME = "sdxl.csv"
|
||||||
|
|
||||||
|
|
||||||
|
def get_input_dict(**device_dtype_kwargs):
|
||||||
|
# height: 1024
|
||||||
|
# width: 1024
|
||||||
|
# max_sequence_length: 77
|
||||||
|
hidden_states = torch.randn(1, 4, 128, 128, **device_dtype_kwargs)
|
||||||
|
encoder_hidden_states = torch.randn(1, 77, 2048, **device_dtype_kwargs)
|
||||||
|
timestep = torch.tensor([1.0], **device_dtype_kwargs)
|
||||||
|
added_cond_kwargs = {
|
||||||
|
"text_embeds": torch.randn(1, 1280, **device_dtype_kwargs),
|
||||||
|
"time_ids": torch.ones(1, 6, **device_dtype_kwargs),
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"sample": hidden_states,
|
||||||
|
"encoder_hidden_states": encoder_hidden_states,
|
||||||
|
"timestep": timestep,
|
||||||
|
"added_cond_kwargs": added_cond_kwargs,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
scenarios = [
|
||||||
|
BenchmarkScenario(
|
||||||
|
name=f"{CKPT_ID}-bf16",
|
||||||
|
model_cls=UNet2DConditionModel,
|
||||||
|
model_init_kwargs={
|
||||||
|
"pretrained_model_name_or_path": CKPT_ID,
|
||||||
|
"torch_dtype": torch.bfloat16,
|
||||||
|
"subfolder": "unet",
|
||||||
|
},
|
||||||
|
get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16),
|
||||||
|
model_init_fn=model_init_fn,
|
||||||
|
compile_kwargs={"fullgraph": True},
|
||||||
|
),
|
||||||
|
BenchmarkScenario(
|
||||||
|
name=f"{CKPT_ID}-layerwise-upcasting",
|
||||||
|
model_cls=UNet2DConditionModel,
|
||||||
|
model_init_kwargs={
|
||||||
|
"pretrained_model_name_or_path": CKPT_ID,
|
||||||
|
"torch_dtype": torch.bfloat16,
|
||||||
|
"subfolder": "unet",
|
||||||
|
},
|
||||||
|
get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16),
|
||||||
|
model_init_fn=partial(model_init_fn, layerwise_upcasting=True),
|
||||||
|
),
|
||||||
|
BenchmarkScenario(
|
||||||
|
name=f"{CKPT_ID}-group-offload-leaf",
|
||||||
|
model_cls=UNet2DConditionModel,
|
||||||
|
model_init_kwargs={
|
||||||
|
"pretrained_model_name_or_path": CKPT_ID,
|
||||||
|
"torch_dtype": torch.bfloat16,
|
||||||
|
"subfolder": "unet",
|
||||||
|
},
|
||||||
|
get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16),
|
||||||
|
model_init_fn=partial(
|
||||||
|
model_init_fn,
|
||||||
|
group_offload_kwargs={
|
||||||
|
"onload_device": torch_device,
|
||||||
|
"offload_device": torch.device("cpu"),
|
||||||
|
"offload_type": "leaf_level",
|
||||||
|
"use_stream": True,
|
||||||
|
"non_blocking": True,
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
runner = BenchmarkMixin()
|
||||||
|
runner.run_bencmarks_and_collate(scenarios, filename=RESULT_FILENAME)
|
||||||
244
benchmarks/benchmarking_utils.py
Normal file
244
benchmarks/benchmarking_utils.py
Normal file
@@ -0,0 +1,244 @@
|
|||||||
|
import gc
|
||||||
|
import inspect
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import queue
|
||||||
|
import threading
|
||||||
|
from contextlib import nullcontext
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Any, Callable, Dict, Optional, Union
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
import torch
|
||||||
|
import torch.utils.benchmark as benchmark
|
||||||
|
|
||||||
|
from diffusers.models.modeling_utils import ModelMixin
|
||||||
|
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
|
||||||
|
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(name)s: %(message)s")
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
NUM_WARMUP_ROUNDS = 5
|
||||||
|
|
||||||
|
|
||||||
|
def benchmark_fn(f, *args, **kwargs):
|
||||||
|
t0 = benchmark.Timer(
|
||||||
|
stmt="f(*args, **kwargs)",
|
||||||
|
globals={"args": args, "kwargs": kwargs, "f": f},
|
||||||
|
num_threads=1,
|
||||||
|
)
|
||||||
|
return float(f"{(t0.blocked_autorange().mean):.3f}")
|
||||||
|
|
||||||
|
|
||||||
|
def flush():
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
torch.cuda.reset_max_memory_allocated()
|
||||||
|
torch.cuda.reset_peak_memory_stats()
|
||||||
|
|
||||||
|
|
||||||
|
# Adapted from https://github.com/lucasb-eyer/cnn_vit_benchmarks/blob/15b665ff758e8062131353076153905cae00a71f/main.py
|
||||||
|
def calculate_flops(model, input_dict):
|
||||||
|
try:
|
||||||
|
from torchprofile import profile_macs
|
||||||
|
except ModuleNotFoundError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
# This is a hacky way to convert the kwargs to args as `profile_macs` cries about kwargs.
|
||||||
|
sig = inspect.signature(model.forward)
|
||||||
|
param_names = [
|
||||||
|
p.name
|
||||||
|
for p in sig.parameters.values()
|
||||||
|
if p.kind
|
||||||
|
in (
|
||||||
|
inspect.Parameter.POSITIONAL_ONLY,
|
||||||
|
inspect.Parameter.POSITIONAL_OR_KEYWORD,
|
||||||
|
)
|
||||||
|
and p.name != "self"
|
||||||
|
]
|
||||||
|
bound = sig.bind_partial(**input_dict)
|
||||||
|
bound.apply_defaults()
|
||||||
|
args = tuple(bound.arguments[name] for name in param_names)
|
||||||
|
|
||||||
|
model.eval()
|
||||||
|
with torch.no_grad():
|
||||||
|
macs = profile_macs(model, args)
|
||||||
|
flops = 2 * macs # 1 MAC operation = 2 FLOPs (1 multiplication + 1 addition)
|
||||||
|
return flops
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_params(model):
|
||||||
|
return sum(p.numel() for p in model.parameters())
|
||||||
|
|
||||||
|
|
||||||
|
# Users can define their own in case this doesn't suffice. For most cases,
|
||||||
|
# it should be sufficient.
|
||||||
|
def model_init_fn(model_cls, group_offload_kwargs=None, layerwise_upcasting=False, **init_kwargs):
|
||||||
|
model = model_cls.from_pretrained(**init_kwargs).eval()
|
||||||
|
if group_offload_kwargs and isinstance(group_offload_kwargs, dict):
|
||||||
|
model.enable_group_offload(**group_offload_kwargs)
|
||||||
|
else:
|
||||||
|
model.to(torch_device)
|
||||||
|
if layerwise_upcasting:
|
||||||
|
model.enable_layerwise_casting(
|
||||||
|
storage_dtype=torch.float8_e4m3fn, compute_dtype=init_kwargs.get("torch_dtype", torch.bfloat16)
|
||||||
|
)
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class BenchmarkScenario:
|
||||||
|
name: str
|
||||||
|
model_cls: ModelMixin
|
||||||
|
model_init_kwargs: Dict[str, Any]
|
||||||
|
model_init_fn: Callable
|
||||||
|
get_model_input_dict: Callable
|
||||||
|
compile_kwargs: Optional[Dict[str, Any]] = None
|
||||||
|
|
||||||
|
|
||||||
|
@require_torch_gpu
|
||||||
|
class BenchmarkMixin:
|
||||||
|
def pre_benchmark(self):
|
||||||
|
flush()
|
||||||
|
torch.compiler.reset()
|
||||||
|
|
||||||
|
def post_benchmark(self, model):
|
||||||
|
model.cpu()
|
||||||
|
flush()
|
||||||
|
torch.compiler.reset()
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def run_benchmark(self, scenario: BenchmarkScenario):
|
||||||
|
# 0) Basic stats
|
||||||
|
logger.info(f"Running scenario: {scenario.name}.")
|
||||||
|
try:
|
||||||
|
model = model_init_fn(scenario.model_cls, **scenario.model_init_kwargs)
|
||||||
|
num_params = round(calculate_params(model) / 1e9, 2)
|
||||||
|
try:
|
||||||
|
flops = round(calculate_flops(model, input_dict=scenario.get_model_input_dict()) / 1e9, 2)
|
||||||
|
except Exception as e:
|
||||||
|
logger.info(f"Problem in calculating FLOPs:\n{e}")
|
||||||
|
flops = None
|
||||||
|
model.cpu()
|
||||||
|
del model
|
||||||
|
except Exception as e:
|
||||||
|
logger.info(f"Error while initializing the model and calculating FLOPs:\n{e}")
|
||||||
|
return {}
|
||||||
|
self.pre_benchmark()
|
||||||
|
|
||||||
|
# 1) plain stats
|
||||||
|
results = {}
|
||||||
|
plain = None
|
||||||
|
try:
|
||||||
|
plain = self._run_phase(
|
||||||
|
model_cls=scenario.model_cls,
|
||||||
|
init_fn=scenario.model_init_fn,
|
||||||
|
init_kwargs=scenario.model_init_kwargs,
|
||||||
|
get_input_fn=scenario.get_model_input_dict,
|
||||||
|
compile_kwargs=None,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.info(f"Benchmark could not be run with the following error:\n{e}")
|
||||||
|
return results
|
||||||
|
|
||||||
|
# 2) compiled stats (if any)
|
||||||
|
compiled = {"time": None, "memory": None}
|
||||||
|
if scenario.compile_kwargs:
|
||||||
|
try:
|
||||||
|
compiled = self._run_phase(
|
||||||
|
model_cls=scenario.model_cls,
|
||||||
|
init_fn=scenario.model_init_fn,
|
||||||
|
init_kwargs=scenario.model_init_kwargs,
|
||||||
|
get_input_fn=scenario.get_model_input_dict,
|
||||||
|
compile_kwargs=scenario.compile_kwargs,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.info(f"Compilation benchmark could not be run with the following error\n: {e}")
|
||||||
|
if plain is None:
|
||||||
|
return results
|
||||||
|
|
||||||
|
# 3) merge
|
||||||
|
result = {
|
||||||
|
"scenario": scenario.name,
|
||||||
|
"model_cls": scenario.model_cls.__name__,
|
||||||
|
"num_params_B": num_params,
|
||||||
|
"flops_G": flops,
|
||||||
|
"time_plain_s": plain["time"],
|
||||||
|
"mem_plain_GB": plain["memory"],
|
||||||
|
"time_compile_s": compiled["time"],
|
||||||
|
"mem_compile_GB": compiled["memory"],
|
||||||
|
}
|
||||||
|
if scenario.compile_kwargs:
|
||||||
|
result["fullgraph"] = scenario.compile_kwargs.get("fullgraph", False)
|
||||||
|
result["mode"] = scenario.compile_kwargs.get("mode", "default")
|
||||||
|
else:
|
||||||
|
result["fullgraph"], result["mode"] = None, None
|
||||||
|
return result
|
||||||
|
|
||||||
|
def run_bencmarks_and_collate(self, scenarios: Union[BenchmarkScenario, list[BenchmarkScenario]], filename: str):
|
||||||
|
if not isinstance(scenarios, list):
|
||||||
|
scenarios = [scenarios]
|
||||||
|
record_queue = queue.Queue()
|
||||||
|
stop_signal = object()
|
||||||
|
|
||||||
|
def _writer_thread():
|
||||||
|
while True:
|
||||||
|
item = record_queue.get()
|
||||||
|
if item is stop_signal:
|
||||||
|
break
|
||||||
|
df_row = pd.DataFrame([item])
|
||||||
|
write_header = not os.path.exists(filename)
|
||||||
|
df_row.to_csv(filename, mode="a", header=write_header, index=False)
|
||||||
|
record_queue.task_done()
|
||||||
|
|
||||||
|
record_queue.task_done()
|
||||||
|
|
||||||
|
writer = threading.Thread(target=_writer_thread, daemon=True)
|
||||||
|
writer.start()
|
||||||
|
|
||||||
|
for s in scenarios:
|
||||||
|
try:
|
||||||
|
record = self.run_benchmark(s)
|
||||||
|
if record:
|
||||||
|
record_queue.put(record)
|
||||||
|
else:
|
||||||
|
logger.info(f"Record empty from scenario: {s.name}.")
|
||||||
|
except Exception as e:
|
||||||
|
logger.info(f"Running scenario ({s.name}) led to error:\n{e}")
|
||||||
|
record_queue.put(stop_signal)
|
||||||
|
logger.info(f"Results serialized to {filename=}.")
|
||||||
|
|
||||||
|
def _run_phase(
|
||||||
|
self,
|
||||||
|
*,
|
||||||
|
model_cls: ModelMixin,
|
||||||
|
init_fn: Callable,
|
||||||
|
init_kwargs: Dict[str, Any],
|
||||||
|
get_input_fn: Callable,
|
||||||
|
compile_kwargs: Optional[Dict[str, Any]],
|
||||||
|
) -> Dict[str, float]:
|
||||||
|
# setup
|
||||||
|
self.pre_benchmark()
|
||||||
|
|
||||||
|
# init & (optional) compile
|
||||||
|
model = init_fn(model_cls, **init_kwargs)
|
||||||
|
if compile_kwargs:
|
||||||
|
model.compile(**compile_kwargs)
|
||||||
|
|
||||||
|
# build inputs
|
||||||
|
inp = get_input_fn()
|
||||||
|
|
||||||
|
# measure
|
||||||
|
run_ctx = torch._inductor.utils.fresh_inductor_cache() if compile_kwargs else nullcontext()
|
||||||
|
with run_ctx:
|
||||||
|
for _ in range(NUM_WARMUP_ROUNDS):
|
||||||
|
_ = model(**inp)
|
||||||
|
time_s = benchmark_fn(lambda m, d: m(**d), model, inp)
|
||||||
|
mem_gb = torch.cuda.max_memory_allocated() / (1024**3)
|
||||||
|
mem_gb = round(mem_gb, 2)
|
||||||
|
|
||||||
|
# teardown
|
||||||
|
self.post_benchmark(model)
|
||||||
|
del model
|
||||||
|
return {"time": time_s, "memory": mem_gb}
|
||||||
74
benchmarks/benchmarking_wan.py
Normal file
74
benchmarks/benchmarking_wan.py
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
from functools import partial
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from benchmarking_utils import BenchmarkMixin, BenchmarkScenario, model_init_fn
|
||||||
|
|
||||||
|
from diffusers import WanTransformer3DModel
|
||||||
|
from diffusers.utils.testing_utils import torch_device
|
||||||
|
|
||||||
|
|
||||||
|
CKPT_ID = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
|
||||||
|
RESULT_FILENAME = "wan.csv"
|
||||||
|
|
||||||
|
|
||||||
|
def get_input_dict(**device_dtype_kwargs):
|
||||||
|
# height: 480
|
||||||
|
# width: 832
|
||||||
|
# num_frames: 81
|
||||||
|
# max_sequence_length: 512
|
||||||
|
hidden_states = torch.randn(1, 16, 21, 60, 104, **device_dtype_kwargs)
|
||||||
|
encoder_hidden_states = torch.randn(1, 512, 4096, **device_dtype_kwargs)
|
||||||
|
timestep = torch.tensor([1.0], **device_dtype_kwargs)
|
||||||
|
|
||||||
|
return {"hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "timestep": timestep}
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
scenarios = [
|
||||||
|
BenchmarkScenario(
|
||||||
|
name=f"{CKPT_ID}-bf16",
|
||||||
|
model_cls=WanTransformer3DModel,
|
||||||
|
model_init_kwargs={
|
||||||
|
"pretrained_model_name_or_path": CKPT_ID,
|
||||||
|
"torch_dtype": torch.bfloat16,
|
||||||
|
"subfolder": "transformer",
|
||||||
|
},
|
||||||
|
get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16),
|
||||||
|
model_init_fn=model_init_fn,
|
||||||
|
compile_kwargs={"fullgraph": True},
|
||||||
|
),
|
||||||
|
BenchmarkScenario(
|
||||||
|
name=f"{CKPT_ID}-layerwise-upcasting",
|
||||||
|
model_cls=WanTransformer3DModel,
|
||||||
|
model_init_kwargs={
|
||||||
|
"pretrained_model_name_or_path": CKPT_ID,
|
||||||
|
"torch_dtype": torch.bfloat16,
|
||||||
|
"subfolder": "transformer",
|
||||||
|
},
|
||||||
|
get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16),
|
||||||
|
model_init_fn=partial(model_init_fn, layerwise_upcasting=True),
|
||||||
|
),
|
||||||
|
BenchmarkScenario(
|
||||||
|
name=f"{CKPT_ID}-group-offload-leaf",
|
||||||
|
model_cls=WanTransformer3DModel,
|
||||||
|
model_init_kwargs={
|
||||||
|
"pretrained_model_name_or_path": CKPT_ID,
|
||||||
|
"torch_dtype": torch.bfloat16,
|
||||||
|
"subfolder": "transformer",
|
||||||
|
},
|
||||||
|
get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16),
|
||||||
|
model_init_fn=partial(
|
||||||
|
model_init_fn,
|
||||||
|
group_offload_kwargs={
|
||||||
|
"onload_device": torch_device,
|
||||||
|
"offload_device": torch.device("cpu"),
|
||||||
|
"offload_type": "leaf_level",
|
||||||
|
"use_stream": True,
|
||||||
|
"non_blocking": True,
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
runner = BenchmarkMixin()
|
||||||
|
runner.run_bencmarks_and_collate(scenarios, filename=RESULT_FILENAME)
|
||||||
166
benchmarks/populate_into_db.py
Normal file
166
benchmarks/populate_into_db.py
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import gpustat
|
||||||
|
import pandas as pd
|
||||||
|
import psycopg2
|
||||||
|
import psycopg2.extras
|
||||||
|
from psycopg2.extensions import register_adapter
|
||||||
|
from psycopg2.extras import Json
|
||||||
|
|
||||||
|
|
||||||
|
register_adapter(dict, Json)
|
||||||
|
|
||||||
|
FINAL_CSV_FILENAME = "collated_results.csv"
|
||||||
|
# https://github.com/huggingface/transformers/blob/593e29c5e2a9b17baec010e8dc7c1431fed6e841/benchmark/init_db.sql#L27
|
||||||
|
BENCHMARKS_TABLE_NAME = "benchmarks"
|
||||||
|
MEASUREMENTS_TABLE_NAME = "model_measurements"
|
||||||
|
|
||||||
|
|
||||||
|
def _init_benchmark(conn, branch, commit_id, commit_msg):
|
||||||
|
gpu_stats = gpustat.GPUStatCollection.new_query()
|
||||||
|
metadata = {"gpu_name": gpu_stats[0]["name"]}
|
||||||
|
repository = "huggingface/diffusers"
|
||||||
|
with conn.cursor() as cur:
|
||||||
|
cur.execute(
|
||||||
|
f"INSERT INTO {BENCHMARKS_TABLE_NAME} (repository, branch, commit_id, commit_message, metadata) VALUES (%s, %s, %s, %s, %s) RETURNING benchmark_id",
|
||||||
|
(repository, branch, commit_id, commit_msg, metadata),
|
||||||
|
)
|
||||||
|
benchmark_id = cur.fetchone()[0]
|
||||||
|
print(f"Initialised benchmark #{benchmark_id}")
|
||||||
|
return benchmark_id
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
"branch",
|
||||||
|
type=str,
|
||||||
|
help="The branch name on which the benchmarking is performed.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"commit_id",
|
||||||
|
type=str,
|
||||||
|
help="The commit hash on which the benchmarking is performed.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"commit_msg",
|
||||||
|
type=str,
|
||||||
|
help="The commit message associated with the commit, truncated to 70 characters.",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
args = parse_args()
|
||||||
|
try:
|
||||||
|
conn = psycopg2.connect(
|
||||||
|
host=os.getenv("PGHOST"),
|
||||||
|
database=os.getenv("PGDATABASE"),
|
||||||
|
user=os.getenv("PGUSER"),
|
||||||
|
password=os.getenv("PGPASSWORD"),
|
||||||
|
)
|
||||||
|
print("DB connection established successfully.")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Problem during DB init: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
try:
|
||||||
|
benchmark_id = _init_benchmark(
|
||||||
|
conn=conn,
|
||||||
|
branch=args.branch,
|
||||||
|
commit_id=args.commit_id,
|
||||||
|
commit_msg=args.commit_msg,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Problem during initializing benchmark: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
cur = conn.cursor()
|
||||||
|
|
||||||
|
df = pd.read_csv(FINAL_CSV_FILENAME)
|
||||||
|
|
||||||
|
# Helper to cast values (or None) given a dtype
|
||||||
|
def _cast_value(val, dtype: str):
|
||||||
|
if pd.isna(val):
|
||||||
|
return None
|
||||||
|
|
||||||
|
if dtype == "text":
|
||||||
|
return str(val).strip()
|
||||||
|
|
||||||
|
if dtype == "float":
|
||||||
|
try:
|
||||||
|
return float(val)
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if dtype == "bool":
|
||||||
|
s = str(val).strip().lower()
|
||||||
|
if s in ("true", "t", "yes", "1"):
|
||||||
|
return True
|
||||||
|
if s in ("false", "f", "no", "0"):
|
||||||
|
return False
|
||||||
|
if val in (1, 1.0):
|
||||||
|
return True
|
||||||
|
if val in (0, 0.0):
|
||||||
|
return False
|
||||||
|
return None
|
||||||
|
|
||||||
|
return val
|
||||||
|
|
||||||
|
try:
|
||||||
|
rows_to_insert = []
|
||||||
|
for _, row in df.iterrows():
|
||||||
|
scenario = _cast_value(row.get("scenario"), "text")
|
||||||
|
model_cls = _cast_value(row.get("model_cls"), "text")
|
||||||
|
num_params_B = _cast_value(row.get("num_params_B"), "float")
|
||||||
|
flops_G = _cast_value(row.get("flops_G"), "float")
|
||||||
|
time_plain_s = _cast_value(row.get("time_plain_s"), "float")
|
||||||
|
mem_plain_GB = _cast_value(row.get("mem_plain_GB"), "float")
|
||||||
|
time_compile_s = _cast_value(row.get("time_compile_s"), "float")
|
||||||
|
mem_compile_GB = _cast_value(row.get("mem_compile_GB"), "float")
|
||||||
|
fullgraph = _cast_value(row.get("fullgraph"), "bool")
|
||||||
|
mode = _cast_value(row.get("mode"), "text")
|
||||||
|
|
||||||
|
# If "github_sha" column exists in the CSV, cast it; else default to None
|
||||||
|
if "github_sha" in df.columns:
|
||||||
|
github_sha = _cast_value(row.get("github_sha"), "text")
|
||||||
|
else:
|
||||||
|
github_sha = None
|
||||||
|
|
||||||
|
measurements = {
|
||||||
|
"scenario": scenario,
|
||||||
|
"model_cls": model_cls,
|
||||||
|
"num_params_B": num_params_B,
|
||||||
|
"flops_G": flops_G,
|
||||||
|
"time_plain_s": time_plain_s,
|
||||||
|
"mem_plain_GB": mem_plain_GB,
|
||||||
|
"time_compile_s": time_compile_s,
|
||||||
|
"mem_compile_GB": mem_compile_GB,
|
||||||
|
"fullgraph": fullgraph,
|
||||||
|
"mode": mode,
|
||||||
|
"github_sha": github_sha,
|
||||||
|
}
|
||||||
|
rows_to_insert.append((benchmark_id, measurements))
|
||||||
|
|
||||||
|
# Batch-insert all rows
|
||||||
|
insert_sql = f"""
|
||||||
|
INSERT INTO {MEASUREMENTS_TABLE_NAME} (
|
||||||
|
benchmark_id,
|
||||||
|
measurements
|
||||||
|
)
|
||||||
|
VALUES (%s, %s);
|
||||||
|
"""
|
||||||
|
|
||||||
|
psycopg2.extras.execute_batch(cur, insert_sql, rows_to_insert)
|
||||||
|
conn.commit()
|
||||||
|
|
||||||
|
cur.close()
|
||||||
|
conn.close()
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Exception: {e}")
|
||||||
|
sys.exit(1)
|
||||||
@@ -1,19 +1,19 @@
|
|||||||
import glob
|
import os
|
||||||
import sys
|
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from huggingface_hub import hf_hub_download, upload_file
|
from huggingface_hub import hf_hub_download, upload_file
|
||||||
from huggingface_hub.utils import EntryNotFoundError
|
from huggingface_hub.utils import EntryNotFoundError
|
||||||
|
|
||||||
|
|
||||||
sys.path.append(".")
|
REPO_ID = "diffusers/benchmarks"
|
||||||
from utils import BASE_PATH, FINAL_CSV_FILE, GITHUB_SHA, REPO_ID, collate_csv # noqa: E402
|
|
||||||
|
|
||||||
|
|
||||||
def has_previous_benchmark() -> str:
|
def has_previous_benchmark() -> str:
|
||||||
|
from run_all import FINAL_CSV_FILENAME
|
||||||
|
|
||||||
csv_path = None
|
csv_path = None
|
||||||
try:
|
try:
|
||||||
csv_path = hf_hub_download(repo_id=REPO_ID, repo_type="dataset", filename=FINAL_CSV_FILE)
|
csv_path = hf_hub_download(repo_id=REPO_ID, repo_type="dataset", filename=FINAL_CSV_FILENAME)
|
||||||
except EntryNotFoundError:
|
except EntryNotFoundError:
|
||||||
csv_path = None
|
csv_path = None
|
||||||
return csv_path
|
return csv_path
|
||||||
@@ -26,46 +26,50 @@ def filter_float(value):
|
|||||||
|
|
||||||
|
|
||||||
def push_to_hf_dataset():
|
def push_to_hf_dataset():
|
||||||
all_csvs = sorted(glob.glob(f"{BASE_PATH}/*.csv"))
|
from run_all import FINAL_CSV_FILENAME, GITHUB_SHA
|
||||||
collate_csv(all_csvs, FINAL_CSV_FILE)
|
|
||||||
|
|
||||||
# If there's an existing benchmark file, we should report the changes.
|
|
||||||
csv_path = has_previous_benchmark()
|
csv_path = has_previous_benchmark()
|
||||||
if csv_path is not None:
|
if csv_path is not None:
|
||||||
current_results = pd.read_csv(FINAL_CSV_FILE)
|
current_results = pd.read_csv(FINAL_CSV_FILENAME)
|
||||||
previous_results = pd.read_csv(csv_path)
|
previous_results = pd.read_csv(csv_path)
|
||||||
|
|
||||||
numeric_columns = current_results.select_dtypes(include=["float64", "int64"]).columns
|
numeric_columns = current_results.select_dtypes(include=["float64", "int64"]).columns
|
||||||
numeric_columns = [
|
|
||||||
c for c in numeric_columns if c not in ["batch_size", "num_inference_steps", "actual_gpu_memory (gbs)"]
|
|
||||||
]
|
|
||||||
|
|
||||||
for column in numeric_columns:
|
for column in numeric_columns:
|
||||||
previous_results[column] = previous_results[column].map(lambda x: filter_float(x))
|
# get previous values as floats, aligned to current index
|
||||||
|
prev_vals = previous_results[column].map(filter_float).reindex(current_results.index)
|
||||||
|
|
||||||
# Calculate the percentage change
|
# get current values as floats
|
||||||
current_results[column] = current_results[column].astype(float)
|
curr_vals = current_results[column].astype(float)
|
||||||
previous_results[column] = previous_results[column].astype(float)
|
|
||||||
percent_change = ((current_results[column] - previous_results[column]) / previous_results[column]) * 100
|
|
||||||
|
|
||||||
# Format the values with '+' or '-' sign and append to original values
|
# stringify the current values
|
||||||
current_results[column] = current_results[column].map(str) + percent_change.map(
|
curr_str = curr_vals.map(str)
|
||||||
lambda x: f" ({'+' if x > 0 else ''}{x:.2f}%)"
|
|
||||||
|
# build an appendage only when prev exists and differs
|
||||||
|
append_str = prev_vals.where(prev_vals.notnull() & (prev_vals != curr_vals), other=pd.NA).map(
|
||||||
|
lambda x: f" ({x})" if pd.notnull(x) else ""
|
||||||
)
|
)
|
||||||
# There might be newly added rows. So, filter out the NaNs.
|
|
||||||
current_results[column] = current_results[column].map(lambda x: x.replace(" (nan%)", ""))
|
|
||||||
|
|
||||||
# Overwrite the current result file.
|
# combine
|
||||||
current_results.to_csv(FINAL_CSV_FILE, index=False)
|
current_results[column] = curr_str + append_str
|
||||||
|
os.remove(FINAL_CSV_FILENAME)
|
||||||
|
current_results.to_csv(FINAL_CSV_FILENAME, index=False)
|
||||||
|
|
||||||
commit_message = f"upload from sha: {GITHUB_SHA}" if GITHUB_SHA is not None else "upload benchmark results"
|
commit_message = f"upload from sha: {GITHUB_SHA}" if GITHUB_SHA is not None else "upload benchmark results"
|
||||||
upload_file(
|
upload_file(
|
||||||
repo_id=REPO_ID,
|
repo_id=REPO_ID,
|
||||||
path_in_repo=FINAL_CSV_FILE,
|
path_in_repo=FINAL_CSV_FILENAME,
|
||||||
path_or_fileobj=FINAL_CSV_FILE,
|
path_or_fileobj=FINAL_CSV_FILENAME,
|
||||||
repo_type="dataset",
|
repo_type="dataset",
|
||||||
commit_message=commit_message,
|
commit_message=commit_message,
|
||||||
)
|
)
|
||||||
|
upload_file(
|
||||||
|
repo_id="diffusers/benchmark-analyzer",
|
||||||
|
path_in_repo=FINAL_CSV_FILENAME,
|
||||||
|
path_or_fileobj=FINAL_CSV_FILENAME,
|
||||||
|
repo_type="space",
|
||||||
|
commit_message=commit_message,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
6
benchmarks/requirements.txt
Normal file
6
benchmarks/requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
pandas
|
||||||
|
psutil
|
||||||
|
gpustat
|
||||||
|
torchprofile
|
||||||
|
bitsandbytes
|
||||||
|
psycopg2==2.9.9
|
||||||
@@ -1,101 +1,84 @@
|
|||||||
import glob
|
import glob
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
|
||||||
from typing import List
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
sys.path.append(".")
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(name)s: %(message)s")
|
||||||
from benchmark_text_to_image import ALL_T2I_CKPTS # noqa: E402
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
PATTERN = "benchmarking_*.py"
|
||||||
PATTERN = "benchmark_*.py"
|
FINAL_CSV_FILENAME = "collated_results.csv"
|
||||||
|
GITHUB_SHA = os.getenv("GITHUB_SHA", None)
|
||||||
|
|
||||||
|
|
||||||
class SubprocessCallException(Exception):
|
class SubprocessCallException(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
# Taken from `test_examples_utils.py`
|
def run_command(command: list[str], return_stdout=False):
|
||||||
def run_command(command: List[str], return_stdout=False):
|
|
||||||
"""
|
|
||||||
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
|
|
||||||
if an error occurred while running `command`
|
|
||||||
"""
|
|
||||||
try:
|
try:
|
||||||
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
|
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
|
||||||
if return_stdout:
|
if return_stdout and hasattr(output, "decode"):
|
||||||
if hasattr(output, "decode"):
|
return output.decode("utf-8")
|
||||||
output = output.decode("utf-8")
|
|
||||||
return output
|
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
raise SubprocessCallException(
|
raise SubprocessCallException(f"Command `{' '.join(command)}` failed with:\n{e.output.decode()}") from e
|
||||||
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
|
|
||||||
) from e
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def merge_csvs(final_csv: str = "collated_results.csv"):
|
||||||
python_files = glob.glob(PATTERN)
|
all_csvs = glob.glob("*.csv")
|
||||||
|
all_csvs = [f for f in all_csvs if f != final_csv]
|
||||||
|
if not all_csvs:
|
||||||
|
logger.info("No result CSVs found to merge.")
|
||||||
|
return
|
||||||
|
|
||||||
for file in python_files:
|
df_list = []
|
||||||
print(f"****** Running file: {file} ******")
|
for f in all_csvs:
|
||||||
|
try:
|
||||||
# Run with canonical settings.
|
d = pd.read_csv(f)
|
||||||
if file != "benchmark_text_to_image.py" and file != "benchmark_ip_adapters.py":
|
except pd.errors.EmptyDataError:
|
||||||
command = f"python {file}"
|
# If a file existed but was zero‐bytes or corrupted, skip it
|
||||||
run_command(command.split())
|
|
||||||
|
|
||||||
command += " --run_compile"
|
|
||||||
run_command(command.split())
|
|
||||||
|
|
||||||
# Run variants.
|
|
||||||
for file in python_files:
|
|
||||||
# See: https://github.com/pytorch/pytorch/issues/129637
|
|
||||||
if file == "benchmark_ip_adapters.py":
|
|
||||||
continue
|
continue
|
||||||
|
df_list.append(d)
|
||||||
|
|
||||||
if file == "benchmark_text_to_image.py":
|
if not df_list:
|
||||||
for ckpt in ALL_T2I_CKPTS:
|
logger.info("All result CSVs were empty or invalid; nothing to merge.")
|
||||||
command = f"python {file} --ckpt {ckpt}"
|
return
|
||||||
|
|
||||||
if "turbo" in ckpt:
|
final_df = pd.concat(df_list, ignore_index=True)
|
||||||
command += " --num_inference_steps 1"
|
if GITHUB_SHA is not None:
|
||||||
|
final_df["github_sha"] = GITHUB_SHA
|
||||||
|
final_df.to_csv(final_csv, index=False)
|
||||||
|
logger.info(f"Merged {len(all_csvs)} partial CSVs → {final_csv}.")
|
||||||
|
|
||||||
run_command(command.split())
|
|
||||||
|
|
||||||
command += " --run_compile"
|
def run_scripts():
|
||||||
run_command(command.split())
|
python_files = sorted(glob.glob(PATTERN))
|
||||||
|
python_files = [f for f in python_files if f != "benchmarking_utils.py"]
|
||||||
|
|
||||||
elif file == "benchmark_sd_img.py":
|
for file in python_files:
|
||||||
for ckpt in ["stabilityai/stable-diffusion-xl-refiner-1.0", "stabilityai/sdxl-turbo"]:
|
script_name = file.split(".py")[0].split("_")[-1] # example: benchmarking_foo.py -> foo
|
||||||
command = f"python {file} --ckpt {ckpt}"
|
logger.info(f"\n****** Running file: {file} ******")
|
||||||
|
|
||||||
if ckpt == "stabilityai/sdxl-turbo":
|
partial_csv = f"{script_name}.csv"
|
||||||
command += " --num_inference_steps 2"
|
if os.path.exists(partial_csv):
|
||||||
|
logger.info(f"Found {partial_csv}. Removing for safer numbers and duplication.")
|
||||||
|
os.remove(partial_csv)
|
||||||
|
|
||||||
run_command(command.split())
|
command = ["python", file]
|
||||||
command += " --run_compile"
|
try:
|
||||||
run_command(command.split())
|
run_command(command)
|
||||||
|
logger.info(f"→ {file} finished normally.")
|
||||||
|
except SubprocessCallException as e:
|
||||||
|
logger.info(f"Error running {file}:\n{e}")
|
||||||
|
finally:
|
||||||
|
logger.info(f"→ Merging partial CSVs after {file} …")
|
||||||
|
merge_csvs(final_csv=FINAL_CSV_FILENAME)
|
||||||
|
|
||||||
elif file in ["benchmark_sd_inpainting.py", "benchmark_ip_adapters.py"]:
|
logger.info(f"\nAll scripts attempted. Final collated CSV: {FINAL_CSV_FILENAME}")
|
||||||
sdxl_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
|
|
||||||
command = f"python {file} --ckpt {sdxl_ckpt}"
|
|
||||||
run_command(command.split())
|
|
||||||
|
|
||||||
command += " --run_compile"
|
|
||||||
run_command(command.split())
|
|
||||||
|
|
||||||
elif file in ["benchmark_controlnet.py", "benchmark_t2i_adapter.py"]:
|
|
||||||
sdxl_ckpt = (
|
|
||||||
"diffusers/controlnet-canny-sdxl-1.0"
|
|
||||||
if "controlnet" in file
|
|
||||||
else "TencentARC/t2i-adapter-canny-sdxl-1.0"
|
|
||||||
)
|
|
||||||
command = f"python {file} --ckpt {sdxl_ckpt}"
|
|
||||||
run_command(command.split())
|
|
||||||
|
|
||||||
command += " --run_compile"
|
|
||||||
run_command(command.split())
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
run_scripts()
|
||||||
|
|||||||
@@ -1,98 +0,0 @@
|
|||||||
import argparse
|
|
||||||
import csv
|
|
||||||
import gc
|
|
||||||
import os
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from typing import Dict, List, Union
|
|
||||||
|
|
||||||
import torch
|
|
||||||
import torch.utils.benchmark as benchmark
|
|
||||||
|
|
||||||
|
|
||||||
GITHUB_SHA = os.getenv("GITHUB_SHA", None)
|
|
||||||
BENCHMARK_FIELDS = [
|
|
||||||
"pipeline_cls",
|
|
||||||
"ckpt_id",
|
|
||||||
"batch_size",
|
|
||||||
"num_inference_steps",
|
|
||||||
"model_cpu_offload",
|
|
||||||
"run_compile",
|
|
||||||
"time (secs)",
|
|
||||||
"memory (gbs)",
|
|
||||||
"actual_gpu_memory (gbs)",
|
|
||||||
"github_sha",
|
|
||||||
]
|
|
||||||
|
|
||||||
PROMPT = "ghibli style, a fantasy landscape with castles"
|
|
||||||
BASE_PATH = os.getenv("BASE_PATH", ".")
|
|
||||||
TOTAL_GPU_MEMORY = float(os.getenv("TOTAL_GPU_MEMORY", torch.cuda.get_device_properties(0).total_memory / (1024**3)))
|
|
||||||
|
|
||||||
REPO_ID = "diffusers/benchmarks"
|
|
||||||
FINAL_CSV_FILE = "collated_results.csv"
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class BenchmarkInfo:
|
|
||||||
time: float
|
|
||||||
memory: float
|
|
||||||
|
|
||||||
|
|
||||||
def flush():
|
|
||||||
"""Wipes off memory."""
|
|
||||||
gc.collect()
|
|
||||||
torch.cuda.empty_cache()
|
|
||||||
torch.cuda.reset_max_memory_allocated()
|
|
||||||
torch.cuda.reset_peak_memory_stats()
|
|
||||||
|
|
||||||
|
|
||||||
def bytes_to_giga_bytes(bytes):
|
|
||||||
return f"{(bytes / 1024 / 1024 / 1024):.3f}"
|
|
||||||
|
|
||||||
|
|
||||||
def benchmark_fn(f, *args, **kwargs):
|
|
||||||
t0 = benchmark.Timer(
|
|
||||||
stmt="f(*args, **kwargs)",
|
|
||||||
globals={"args": args, "kwargs": kwargs, "f": f},
|
|
||||||
num_threads=torch.get_num_threads(),
|
|
||||||
)
|
|
||||||
return f"{(t0.blocked_autorange().mean):.3f}"
|
|
||||||
|
|
||||||
|
|
||||||
def generate_csv_dict(
|
|
||||||
pipeline_cls: str, ckpt: str, args: argparse.Namespace, benchmark_info: BenchmarkInfo
|
|
||||||
) -> Dict[str, Union[str, bool, float]]:
|
|
||||||
"""Packs benchmarking data into a dictionary for latter serialization."""
|
|
||||||
data_dict = {
|
|
||||||
"pipeline_cls": pipeline_cls,
|
|
||||||
"ckpt_id": ckpt,
|
|
||||||
"batch_size": args.batch_size,
|
|
||||||
"num_inference_steps": args.num_inference_steps,
|
|
||||||
"model_cpu_offload": args.model_cpu_offload,
|
|
||||||
"run_compile": args.run_compile,
|
|
||||||
"time (secs)": benchmark_info.time,
|
|
||||||
"memory (gbs)": benchmark_info.memory,
|
|
||||||
"actual_gpu_memory (gbs)": f"{(TOTAL_GPU_MEMORY):.3f}",
|
|
||||||
"github_sha": GITHUB_SHA,
|
|
||||||
}
|
|
||||||
return data_dict
|
|
||||||
|
|
||||||
|
|
||||||
def write_to_csv(file_name: str, data_dict: Dict[str, Union[str, bool, float]]):
|
|
||||||
"""Serializes a dictionary into a CSV file."""
|
|
||||||
with open(file_name, mode="w", newline="") as csvfile:
|
|
||||||
writer = csv.DictWriter(csvfile, fieldnames=BENCHMARK_FIELDS)
|
|
||||||
writer.writeheader()
|
|
||||||
writer.writerow(data_dict)
|
|
||||||
|
|
||||||
|
|
||||||
def collate_csv(input_files: List[str], output_file: str):
|
|
||||||
"""Collates multiple identically structured CSVs into a single CSV file."""
|
|
||||||
with open(output_file, mode="w", newline="") as outfile:
|
|
||||||
writer = csv.DictWriter(outfile, fieldnames=BENCHMARK_FIELDS)
|
|
||||||
writer.writeheader()
|
|
||||||
|
|
||||||
for file in input_files:
|
|
||||||
with open(file, mode="r") as infile:
|
|
||||||
reader = csv.DictReader(infile)
|
|
||||||
for row in reader:
|
|
||||||
writer.writerow(row)
|
|
||||||
@@ -1,52 +1,45 @@
|
|||||||
FROM ubuntu:20.04
|
FROM python:3.10-slim
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE=1
|
||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
LABEL repository="diffusers"
|
LABEL repository="diffusers"
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN apt-get -y update \
|
RUN apt-get -y update && apt-get install -y bash \
|
||||||
&& apt-get install -y software-properties-common \
|
build-essential \
|
||||||
&& add-apt-repository ppa:deadsnakes/ppa
|
git \
|
||||||
|
git-lfs \
|
||||||
|
curl \
|
||||||
|
ca-certificates \
|
||||||
|
libglib2.0-0 \
|
||||||
|
libsndfile1-dev \
|
||||||
|
libgl1 \
|
||||||
|
zip \
|
||||||
|
wget
|
||||||
|
|
||||||
RUN apt install -y bash \
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
build-essential \
|
|
||||||
git \
|
|
||||||
git-lfs \
|
|
||||||
curl \
|
|
||||||
ca-certificates \
|
|
||||||
libsndfile1-dev \
|
|
||||||
python3.10 \
|
|
||||||
python3-pip \
|
|
||||||
libgl1 \
|
|
||||||
zip \
|
|
||||||
wget \
|
|
||||||
python3.10-venv && \
|
|
||||||
rm -rf /var/lib/apt/lists
|
|
||||||
|
|
||||||
# make sure to use venv
|
|
||||||
RUN python3.10 -m venv /opt/venv
|
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
RUN pip install uv
|
||||||
python3.10 -m uv pip install --no-cache-dir \
|
RUN uv pip install --no-cache-dir \
|
||||||
torch \
|
torch \
|
||||||
torchvision \
|
torchvision \
|
||||||
torchaudio \
|
torchaudio \
|
||||||
invisible_watermark \
|
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
--extra-index-url https://download.pytorch.org/whl/cpu && \
|
|
||||||
python3.10 -m uv pip install --no-cache-dir \
|
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/diffusers.git@main#egg=diffusers[test]"
|
||||||
accelerate \
|
|
||||||
datasets \
|
# Extra dependencies
|
||||||
hf-doc-builder \
|
RUN uv pip install --no-cache-dir \
|
||||||
huggingface-hub \
|
accelerate \
|
||||||
Jinja2 \
|
numpy==1.26.4 \
|
||||||
librosa \
|
hf_xet \
|
||||||
numpy==1.26.4 \
|
setuptools==69.5.1 \
|
||||||
scipy \
|
bitsandbytes \
|
||||||
tensorboard \
|
torchao \
|
||||||
transformers \
|
gguf \
|
||||||
matplotlib \
|
optimum-quanto
|
||||||
setuptools==69.5.1
|
|
||||||
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
CMD ["/bin/bash"]
|
||||||
|
|||||||
@@ -1,49 +0,0 @@
|
|||||||
FROM ubuntu:20.04
|
|
||||||
LABEL maintainer="Hugging Face"
|
|
||||||
LABEL repository="diffusers"
|
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
|
||||||
|
|
||||||
RUN apt-get -y update \
|
|
||||||
&& apt-get install -y software-properties-common \
|
|
||||||
&& add-apt-repository ppa:deadsnakes/ppa
|
|
||||||
|
|
||||||
RUN apt install -y bash \
|
|
||||||
build-essential \
|
|
||||||
git \
|
|
||||||
git-lfs \
|
|
||||||
curl \
|
|
||||||
ca-certificates \
|
|
||||||
libsndfile1-dev \
|
|
||||||
libgl1 \
|
|
||||||
python3.10 \
|
|
||||||
python3-pip \
|
|
||||||
python3.10-venv && \
|
|
||||||
rm -rf /var/lib/apt/lists
|
|
||||||
|
|
||||||
# make sure to use venv
|
|
||||||
RUN python3.10 -m venv /opt/venv
|
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
|
||||||
# follow the instructions here: https://cloud.google.com/tpu/docs/run-in-container#train_a_jax_model_in_a_docker_container
|
|
||||||
RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
|
||||||
python3 -m uv pip install --upgrade --no-cache-dir \
|
|
||||||
clu \
|
|
||||||
"jax[cpu]>=0.2.16,!=0.3.2" \
|
|
||||||
"flax>=0.4.1" \
|
|
||||||
"jaxlib>=0.1.65" && \
|
|
||||||
python3 -m uv pip install --no-cache-dir \
|
|
||||||
accelerate \
|
|
||||||
datasets \
|
|
||||||
hf-doc-builder \
|
|
||||||
huggingface-hub \
|
|
||||||
Jinja2 \
|
|
||||||
librosa \
|
|
||||||
numpy==1.26.4 \
|
|
||||||
scipy \
|
|
||||||
tensorboard \
|
|
||||||
transformers \
|
|
||||||
hf_transfer
|
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
|
||||||
@@ -1,51 +0,0 @@
|
|||||||
FROM ubuntu:20.04
|
|
||||||
LABEL maintainer="Hugging Face"
|
|
||||||
LABEL repository="diffusers"
|
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
|
||||||
|
|
||||||
RUN apt-get -y update \
|
|
||||||
&& apt-get install -y software-properties-common \
|
|
||||||
&& add-apt-repository ppa:deadsnakes/ppa
|
|
||||||
|
|
||||||
RUN apt install -y bash \
|
|
||||||
build-essential \
|
|
||||||
git \
|
|
||||||
git-lfs \
|
|
||||||
curl \
|
|
||||||
ca-certificates \
|
|
||||||
libsndfile1-dev \
|
|
||||||
libgl1 \
|
|
||||||
python3.10 \
|
|
||||||
python3-pip \
|
|
||||||
python3.10-venv && \
|
|
||||||
rm -rf /var/lib/apt/lists
|
|
||||||
|
|
||||||
# make sure to use venv
|
|
||||||
RUN python3.10 -m venv /opt/venv
|
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
|
||||||
# follow the instructions here: https://cloud.google.com/tpu/docs/run-in-container#train_a_jax_model_in_a_docker_container
|
|
||||||
RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
|
||||||
python3 -m pip install --no-cache-dir \
|
|
||||||
"jax[tpu]>=0.2.16,!=0.3.2" \
|
|
||||||
-f https://storage.googleapis.com/jax-releases/libtpu_releases.html && \
|
|
||||||
python3 -m uv pip install --upgrade --no-cache-dir \
|
|
||||||
clu \
|
|
||||||
"flax>=0.4.1" \
|
|
||||||
"jaxlib>=0.1.65" && \
|
|
||||||
python3 -m uv pip install --no-cache-dir \
|
|
||||||
accelerate \
|
|
||||||
datasets \
|
|
||||||
hf-doc-builder \
|
|
||||||
huggingface-hub \
|
|
||||||
Jinja2 \
|
|
||||||
librosa \
|
|
||||||
numpy==1.26.4 \
|
|
||||||
scipy \
|
|
||||||
tensorboard \
|
|
||||||
transformers \
|
|
||||||
hf_transfer
|
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
|
||||||
@@ -44,6 +44,6 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
|||||||
scipy \
|
scipy \
|
||||||
tensorboard \
|
tensorboard \
|
||||||
transformers \
|
transformers \
|
||||||
hf_transfer
|
hf_xet
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
CMD ["/bin/bash"]
|
||||||
@@ -38,13 +38,12 @@ RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
|||||||
datasets \
|
datasets \
|
||||||
hf-doc-builder \
|
hf-doc-builder \
|
||||||
huggingface-hub \
|
huggingface-hub \
|
||||||
hf_transfer \
|
hf_xet \
|
||||||
Jinja2 \
|
Jinja2 \
|
||||||
librosa \
|
librosa \
|
||||||
numpy==1.26.4 \
|
numpy==1.26.4 \
|
||||||
scipy \
|
scipy \
|
||||||
tensorboard \
|
tensorboard \
|
||||||
transformers \
|
transformers
|
||||||
hf_transfer
|
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
CMD ["/bin/bash"]
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
FROM nvidia/cuda:12.1.0-runtime-ubuntu20.04
|
|
||||||
LABEL maintainer="Hugging Face"
|
|
||||||
LABEL repository="diffusers"
|
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
|
||||||
|
|
||||||
RUN apt-get -y update \
|
|
||||||
&& apt-get install -y software-properties-common \
|
|
||||||
&& add-apt-repository ppa:deadsnakes/ppa
|
|
||||||
|
|
||||||
RUN apt install -y bash \
|
|
||||||
build-essential \
|
|
||||||
git \
|
|
||||||
git-lfs \
|
|
||||||
curl \
|
|
||||||
ca-certificates \
|
|
||||||
libsndfile1-dev \
|
|
||||||
libgl1 \
|
|
||||||
python3.10 \
|
|
||||||
python3.10-dev \
|
|
||||||
python3-pip \
|
|
||||||
python3.10-venv && \
|
|
||||||
rm -rf /var/lib/apt/lists
|
|
||||||
|
|
||||||
# make sure to use venv
|
|
||||||
RUN python3.10 -m venv /opt/venv
|
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
|
||||||
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
|
||||||
python3.10 -m uv pip install --no-cache-dir \
|
|
||||||
torch \
|
|
||||||
torchvision \
|
|
||||||
torchaudio \
|
|
||||||
invisible_watermark && \
|
|
||||||
python3.10 -m pip install --no-cache-dir \
|
|
||||||
accelerate \
|
|
||||||
datasets \
|
|
||||||
hf-doc-builder \
|
|
||||||
huggingface-hub \
|
|
||||||
hf_transfer \
|
|
||||||
Jinja2 \
|
|
||||||
librosa \
|
|
||||||
numpy==1.26.4 \
|
|
||||||
scipy \
|
|
||||||
tensorboard \
|
|
||||||
transformers \
|
|
||||||
hf_transfer
|
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
|
||||||
@@ -1,50 +1,38 @@
|
|||||||
FROM ubuntu:20.04
|
FROM python:3.10-slim
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE=1
|
||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
LABEL repository="diffusers"
|
LABEL repository="diffusers"
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN apt-get -y update \
|
RUN apt-get -y update && apt-get install -y bash \
|
||||||
&& apt-get install -y software-properties-common \
|
build-essential \
|
||||||
&& add-apt-repository ppa:deadsnakes/ppa
|
git \
|
||||||
|
git-lfs \
|
||||||
|
curl \
|
||||||
|
ca-certificates \
|
||||||
|
libglib2.0-0 \
|
||||||
|
libsndfile1-dev \
|
||||||
|
libgl1
|
||||||
|
|
||||||
RUN apt install -y bash \
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
build-essential \
|
|
||||||
git \
|
|
||||||
git-lfs \
|
|
||||||
curl \
|
|
||||||
ca-certificates \
|
|
||||||
libsndfile1-dev \
|
|
||||||
python3.10 \
|
|
||||||
python3.10-dev \
|
|
||||||
python3-pip \
|
|
||||||
libgl1 \
|
|
||||||
python3.10-venv && \
|
|
||||||
rm -rf /var/lib/apt/lists
|
|
||||||
|
|
||||||
# make sure to use venv
|
|
||||||
RUN python3.10 -m venv /opt/venv
|
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
RUN pip install uv
|
||||||
python3.10 -m uv pip install --no-cache-dir \
|
RUN uv pip install --no-cache-dir \
|
||||||
torch \
|
torch \
|
||||||
torchvision \
|
torchvision \
|
||||||
torchaudio \
|
torchaudio \
|
||||||
invisible_watermark \
|
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
--extra-index-url https://download.pytorch.org/whl/cpu && \
|
|
||||||
python3.10 -m uv pip install --no-cache-dir \
|
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/diffusers.git@main#egg=diffusers[test]"
|
||||||
accelerate \
|
|
||||||
datasets \
|
# Extra dependencies
|
||||||
hf-doc-builder \
|
RUN uv pip install --no-cache-dir \
|
||||||
huggingface-hub \
|
accelerate \
|
||||||
Jinja2 \
|
numpy==1.26.4 \
|
||||||
librosa \
|
hf_xet
|
||||||
numpy==1.26.4 \
|
|
||||||
scipy \
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
||||||
tensorboard \
|
|
||||||
transformers matplotlib \
|
|
||||||
hf_transfer
|
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
CMD ["/bin/bash"]
|
||||||
|
|||||||
@@ -2,11 +2,13 @@ FROM nvidia/cuda:12.1.0-runtime-ubuntu20.04
|
|||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
LABEL repository="diffusers"
|
LABEL repository="diffusers"
|
||||||
|
|
||||||
|
ARG PYTHON_VERSION=3.12
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN apt-get -y update \
|
RUN apt-get -y update \
|
||||||
&& apt-get install -y software-properties-common \
|
&& apt-get install -y software-properties-common \
|
||||||
&& add-apt-repository ppa:deadsnakes/ppa
|
&& add-apt-repository ppa:deadsnakes/ppa && \
|
||||||
|
apt-get update
|
||||||
|
|
||||||
RUN apt install -y bash \
|
RUN apt install -y bash \
|
||||||
build-essential \
|
build-essential \
|
||||||
@@ -14,38 +16,34 @@ RUN apt install -y bash \
|
|||||||
git-lfs \
|
git-lfs \
|
||||||
curl \
|
curl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
libglib2.0-0 \
|
||||||
libsndfile1-dev \
|
libsndfile1-dev \
|
||||||
libgl1 \
|
libgl1 \
|
||||||
python3.10 \
|
python3 \
|
||||||
python3.10-dev \
|
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3.10-venv && \
|
&& apt-get clean \
|
||||||
rm -rf /var/lib/apt/lists
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# make sure to use venv
|
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
RUN python3.10 -m venv /opt/venv
|
ENV PATH="/root/.local/bin:$PATH"
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
ENV VIRTUAL_ENV="/opt/venv"
|
||||||
|
ENV UV_PYTHON_INSTALL_DIR=/opt/uv/python
|
||||||
|
RUN uv venv --python ${PYTHON_VERSION} --seed ${VIRTUAL_ENV}
|
||||||
|
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
RUN uv pip install --no-cache-dir \
|
||||||
python3.10 -m uv pip install --no-cache-dir \
|
|
||||||
torch \
|
torch \
|
||||||
torchvision \
|
torchvision \
|
||||||
torchaudio \
|
torchaudio
|
||||||
invisible_watermark && \
|
|
||||||
python3.10 -m pip install --no-cache-dir \
|
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/diffusers.git@main#egg=diffusers[test]"
|
||||||
|
|
||||||
|
# Extra dependencies
|
||||||
|
RUN uv pip install --no-cache-dir \
|
||||||
accelerate \
|
accelerate \
|
||||||
datasets \
|
|
||||||
hf-doc-builder \
|
|
||||||
huggingface-hub \
|
|
||||||
hf_transfer \
|
|
||||||
Jinja2 \
|
|
||||||
librosa \
|
|
||||||
numpy==1.26.4 \
|
numpy==1.26.4 \
|
||||||
scipy \
|
pytorch-lightning \
|
||||||
tensorboard \
|
hf_xet
|
||||||
transformers \
|
|
||||||
pytorch-lightning \
|
|
||||||
hf_transfer
|
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
CMD ["/bin/bash"]
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ FROM nvidia/cuda:12.1.0-runtime-ubuntu20.04
|
|||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
LABEL repository="diffusers"
|
LABEL repository="diffusers"
|
||||||
|
|
||||||
|
ARG PYTHON_VERSION=3.10
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
ENV MINIMUM_SUPPORTED_TORCH_VERSION="2.1.0"
|
ENV MINIMUM_SUPPORTED_TORCH_VERSION="2.1.0"
|
||||||
ENV MINIMUM_SUPPORTED_TORCHVISION_VERSION="0.16.0"
|
ENV MINIMUM_SUPPORTED_TORCHVISION_VERSION="0.16.0"
|
||||||
@@ -9,7 +10,8 @@ ENV MINIMUM_SUPPORTED_TORCHAUDIO_VERSION="2.1.0"
|
|||||||
|
|
||||||
RUN apt-get -y update \
|
RUN apt-get -y update \
|
||||||
&& apt-get install -y software-properties-common \
|
&& apt-get install -y software-properties-common \
|
||||||
&& add-apt-repository ppa:deadsnakes/ppa
|
&& add-apt-repository ppa:deadsnakes/ppa && \
|
||||||
|
apt-get update
|
||||||
|
|
||||||
RUN apt install -y bash \
|
RUN apt install -y bash \
|
||||||
build-essential \
|
build-essential \
|
||||||
@@ -17,37 +19,34 @@ RUN apt install -y bash \
|
|||||||
git-lfs \
|
git-lfs \
|
||||||
curl \
|
curl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
libglib2.0-0 \
|
||||||
libsndfile1-dev \
|
libsndfile1-dev \
|
||||||
libgl1 \
|
libgl1 \
|
||||||
python3.10 \
|
python3 \
|
||||||
python3.10-dev \
|
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3.10-venv && \
|
&& apt-get clean \
|
||||||
rm -rf /var/lib/apt/lists
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# make sure to use venv
|
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
RUN python3.10 -m venv /opt/venv
|
ENV PATH="/root/.local/bin:$PATH"
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
ENV VIRTUAL_ENV="/opt/venv"
|
||||||
|
ENV UV_PYTHON_INSTALL_DIR=/opt/uv/python
|
||||||
|
RUN uv venv --python ${PYTHON_VERSION} --seed ${VIRTUAL_ENV}
|
||||||
|
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
RUN uv pip install --no-cache-dir \
|
||||||
python3.10 -m uv pip install --no-cache-dir \
|
|
||||||
torch==$MINIMUM_SUPPORTED_TORCH_VERSION \
|
torch==$MINIMUM_SUPPORTED_TORCH_VERSION \
|
||||||
torchvision==$MINIMUM_SUPPORTED_TORCHVISION_VERSION \
|
torchvision==$MINIMUM_SUPPORTED_TORCHVISION_VERSION \
|
||||||
torchaudio==$MINIMUM_SUPPORTED_TORCHAUDIO_VERSION \
|
torchaudio==$MINIMUM_SUPPORTED_TORCHAUDIO_VERSION
|
||||||
invisible_watermark && \
|
|
||||||
python3.10 -m pip install --no-cache-dir \
|
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/diffusers.git@main#egg=diffusers[test]"
|
||||||
|
|
||||||
|
# Extra dependencies
|
||||||
|
RUN uv pip install --no-cache-dir \
|
||||||
accelerate \
|
accelerate \
|
||||||
datasets \
|
|
||||||
hf-doc-builder \
|
|
||||||
huggingface-hub \
|
|
||||||
hf_transfer \
|
|
||||||
Jinja2 \
|
|
||||||
librosa \
|
|
||||||
numpy==1.26.4 \
|
numpy==1.26.4 \
|
||||||
scipy \
|
pytorch-lightning \
|
||||||
tensorboard \
|
hf_xet
|
||||||
transformers \
|
|
||||||
hf_transfer
|
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
CMD ["/bin/bash"]
|
||||||
|
|||||||
@@ -2,50 +2,49 @@ FROM nvidia/cuda:12.1.0-runtime-ubuntu20.04
|
|||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
LABEL repository="diffusers"
|
LABEL repository="diffusers"
|
||||||
|
|
||||||
|
ARG PYTHON_VERSION=3.12
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN apt-get -y update \
|
RUN apt-get -y update \
|
||||||
&& apt-get install -y software-properties-common \
|
&& apt-get install -y software-properties-common \
|
||||||
&& add-apt-repository ppa:deadsnakes/ppa
|
&& add-apt-repository ppa:deadsnakes/ppa && \
|
||||||
|
apt-get update
|
||||||
|
|
||||||
RUN apt install -y bash \
|
RUN apt install -y bash \
|
||||||
build-essential \
|
build-essential \
|
||||||
git \
|
git \
|
||||||
git-lfs \
|
git-lfs \
|
||||||
curl \
|
curl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
libsndfile1-dev \
|
libglib2.0-0 \
|
||||||
libgl1 \
|
libsndfile1-dev \
|
||||||
python3.10 \
|
libgl1 \
|
||||||
python3.10-dev \
|
python3 \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3.10-venv && \
|
&& apt-get clean \
|
||||||
rm -rf /var/lib/apt/lists
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# make sure to use venv
|
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
RUN python3.10 -m venv /opt/venv
|
ENV PATH="/root/.local/bin:$PATH"
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
ENV VIRTUAL_ENV="/opt/venv"
|
||||||
|
ENV UV_PYTHON_INSTALL_DIR=/opt/uv/python
|
||||||
|
RUN uv venv --python ${PYTHON_VERSION} --seed ${VIRTUAL_ENV}
|
||||||
|
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
RUN uv pip install --no-cache-dir \
|
||||||
python3.10 -m pip install --no-cache-dir \
|
torch \
|
||||||
torch \
|
torchvision \
|
||||||
torchvision \
|
torchaudio
|
||||||
torchaudio \
|
|
||||||
invisible_watermark && \
|
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/diffusers.git@main#egg=diffusers[test]"
|
||||||
python3.10 -m uv pip install --no-cache-dir \
|
|
||||||
accelerate \
|
# Extra dependencies
|
||||||
datasets \
|
RUN uv pip install --no-cache-dir \
|
||||||
hf-doc-builder \
|
accelerate \
|
||||||
huggingface-hub \
|
numpy==1.26.4 \
|
||||||
hf_transfer \
|
pytorch-lightning \
|
||||||
Jinja2 \
|
hf_xet \
|
||||||
librosa \
|
xformers
|
||||||
numpy==1.26.4 \
|
|
||||||
scipy \
|
|
||||||
tensorboard \
|
|
||||||
transformers \
|
|
||||||
xformers \
|
|
||||||
hf_transfer
|
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
CMD ["/bin/bash"]
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,36 +1,34 @@
|
|||||||
- sections:
|
- sections:
|
||||||
- local: index
|
- local: index
|
||||||
title: 🧨 Diffusers
|
title: Diffusers
|
||||||
- local: quicktour
|
|
||||||
title: Quicktour
|
|
||||||
- local: stable_diffusion
|
|
||||||
title: Effective and efficient diffusion
|
|
||||||
- local: installation
|
- local: installation
|
||||||
title: Installation
|
title: Installation
|
||||||
|
- local: quicktour
|
||||||
|
title: Quickstart
|
||||||
|
- local: stable_diffusion
|
||||||
|
title: Basic performance
|
||||||
title: Get started
|
title: Get started
|
||||||
- sections:
|
- isExpanded: false
|
||||||
- local: tutorials/tutorial_overview
|
sections:
|
||||||
title: Overview
|
- local: using-diffusers/loading
|
||||||
- local: using-diffusers/write_own_pipeline
|
title: DiffusionPipeline
|
||||||
title: Understanding pipelines, models and schedulers
|
|
||||||
- local: tutorials/autopipeline
|
- local: tutorials/autopipeline
|
||||||
title: AutoPipeline
|
title: AutoPipeline
|
||||||
- local: tutorials/basic_training
|
|
||||||
title: Train a diffusion model
|
|
||||||
title: Tutorials
|
|
||||||
- sections:
|
|
||||||
- local: using-diffusers/loading
|
|
||||||
title: Load pipelines
|
|
||||||
- local: using-diffusers/custom_pipeline_overview
|
- local: using-diffusers/custom_pipeline_overview
|
||||||
title: Load community pipelines and components
|
title: Community pipelines and components
|
||||||
|
- local: using-diffusers/callback
|
||||||
|
title: Pipeline callbacks
|
||||||
|
- local: using-diffusers/reusing_seeds
|
||||||
|
title: Reproducibility
|
||||||
- local: using-diffusers/schedulers
|
- local: using-diffusers/schedulers
|
||||||
title: Load schedulers and models
|
title: Schedulers
|
||||||
- local: using-diffusers/other-formats
|
- local: using-diffusers/other-formats
|
||||||
title: Model files and layouts
|
title: Model formats
|
||||||
- local: using-diffusers/push_to_hub
|
- local: using-diffusers/push_to_hub
|
||||||
title: Push files to the Hub
|
title: Sharing pipelines and models
|
||||||
title: Load pipelines and adapters
|
title: Pipelines
|
||||||
- sections:
|
- isExpanded: false
|
||||||
|
sections:
|
||||||
- local: tutorials/using_peft_for_inference
|
- local: tutorials/using_peft_for_inference
|
||||||
title: LoRA
|
title: LoRA
|
||||||
- local: using-diffusers/ip_adapter
|
- local: using-diffusers/ip_adapter
|
||||||
@@ -44,44 +42,52 @@
|
|||||||
- local: using-diffusers/textual_inversion_inference
|
- local: using-diffusers/textual_inversion_inference
|
||||||
title: Textual inversion
|
title: Textual inversion
|
||||||
title: Adapters
|
title: Adapters
|
||||||
isExpanded: false
|
- isExpanded: false
|
||||||
- sections:
|
sections:
|
||||||
- local: using-diffusers/unconditional_image_generation
|
- local: using-diffusers/weighted_prompts
|
||||||
title: Unconditional image generation
|
title: Prompting
|
||||||
- local: using-diffusers/conditional_image_generation
|
|
||||||
title: Text-to-image
|
|
||||||
- local: using-diffusers/img2img
|
|
||||||
title: Image-to-image
|
|
||||||
- local: using-diffusers/inpaint
|
|
||||||
title: Inpainting
|
|
||||||
- local: using-diffusers/text-img2vid
|
|
||||||
title: Video generation
|
|
||||||
- local: using-diffusers/depth2img
|
|
||||||
title: Depth-to-image
|
|
||||||
title: Generative tasks
|
|
||||||
- sections:
|
|
||||||
- local: using-diffusers/overview_techniques
|
|
||||||
title: Overview
|
|
||||||
- local: using-diffusers/create_a_server
|
- local: using-diffusers/create_a_server
|
||||||
title: Create a server
|
title: Create a server
|
||||||
|
- local: using-diffusers/batched_inference
|
||||||
|
title: Batch inference
|
||||||
- local: training/distributed_inference
|
- local: training/distributed_inference
|
||||||
title: Distributed inference
|
title: Distributed inference
|
||||||
- local: using-diffusers/scheduler_features
|
title: Inference
|
||||||
title: Scheduler features
|
- isExpanded: false
|
||||||
- local: using-diffusers/callback
|
sections:
|
||||||
title: Pipeline callbacks
|
- local: optimization/fp16
|
||||||
- local: using-diffusers/reusing_seeds
|
title: Accelerate inference
|
||||||
title: Reproducible pipelines
|
- local: optimization/cache
|
||||||
- local: using-diffusers/image_quality
|
title: Caching
|
||||||
title: Controlling image quality
|
- local: optimization/attention_backends
|
||||||
- local: using-diffusers/weighted_prompts
|
title: Attention backends
|
||||||
title: Prompt techniques
|
- local: optimization/memory
|
||||||
title: Inference techniques
|
title: Reduce memory usage
|
||||||
- sections:
|
- local: optimization/speed-memory-optims
|
||||||
- local: advanced_inference/outpaint
|
title: Compiling and offloading quantized models
|
||||||
title: Outpainting
|
- sections:
|
||||||
title: Advanced inference
|
- local: optimization/pruna
|
||||||
- sections:
|
title: Pruna
|
||||||
|
- local: optimization/xformers
|
||||||
|
title: xFormers
|
||||||
|
- local: optimization/tome
|
||||||
|
title: Token merging
|
||||||
|
- local: optimization/deepcache
|
||||||
|
title: DeepCache
|
||||||
|
- local: optimization/cache_dit
|
||||||
|
title: CacheDiT
|
||||||
|
- local: optimization/tgate
|
||||||
|
title: TGATE
|
||||||
|
- local: optimization/xdit
|
||||||
|
title: xDiT
|
||||||
|
- local: optimization/para_attn
|
||||||
|
title: ParaAttention
|
||||||
|
- local: using-diffusers/image_quality
|
||||||
|
title: FreeU
|
||||||
|
title: Community optimizations
|
||||||
|
title: Inference optimization
|
||||||
|
- isExpanded: false
|
||||||
|
sections:
|
||||||
- local: hybrid_inference/overview
|
- local: hybrid_inference/overview
|
||||||
title: Overview
|
title: Overview
|
||||||
- local: hybrid_inference/vae_decode
|
- local: hybrid_inference/vae_decode
|
||||||
@@ -91,9 +97,106 @@
|
|||||||
- local: hybrid_inference/api_reference
|
- local: hybrid_inference/api_reference
|
||||||
title: API Reference
|
title: API Reference
|
||||||
title: Hybrid Inference
|
title: Hybrid Inference
|
||||||
- sections:
|
- isExpanded: false
|
||||||
- local: using-diffusers/cogvideox
|
sections:
|
||||||
title: CogVideoX
|
- local: modular_diffusers/overview
|
||||||
|
title: Overview
|
||||||
|
- local: modular_diffusers/quickstart
|
||||||
|
title: Quickstart
|
||||||
|
- local: modular_diffusers/modular_diffusers_states
|
||||||
|
title: States
|
||||||
|
- local: modular_diffusers/pipeline_block
|
||||||
|
title: ModularPipelineBlocks
|
||||||
|
- local: modular_diffusers/sequential_pipeline_blocks
|
||||||
|
title: SequentialPipelineBlocks
|
||||||
|
- local: modular_diffusers/loop_sequential_pipeline_blocks
|
||||||
|
title: LoopSequentialPipelineBlocks
|
||||||
|
- local: modular_diffusers/auto_pipeline_blocks
|
||||||
|
title: AutoPipelineBlocks
|
||||||
|
- local: modular_diffusers/modular_pipeline
|
||||||
|
title: ModularPipeline
|
||||||
|
- local: modular_diffusers/components_manager
|
||||||
|
title: ComponentsManager
|
||||||
|
- local: modular_diffusers/guiders
|
||||||
|
title: Guiders
|
||||||
|
title: Modular Diffusers
|
||||||
|
- isExpanded: false
|
||||||
|
sections:
|
||||||
|
- local: training/overview
|
||||||
|
title: Overview
|
||||||
|
- local: training/create_dataset
|
||||||
|
title: Create a dataset for training
|
||||||
|
- local: training/adapt_a_model
|
||||||
|
title: Adapt a model to a new task
|
||||||
|
- local: tutorials/basic_training
|
||||||
|
title: Train a diffusion model
|
||||||
|
- sections:
|
||||||
|
- local: training/unconditional_training
|
||||||
|
title: Unconditional image generation
|
||||||
|
- local: training/text2image
|
||||||
|
title: Text-to-image
|
||||||
|
- local: training/sdxl
|
||||||
|
title: Stable Diffusion XL
|
||||||
|
- local: training/kandinsky
|
||||||
|
title: Kandinsky 2.2
|
||||||
|
- local: training/wuerstchen
|
||||||
|
title: Wuerstchen
|
||||||
|
- local: training/controlnet
|
||||||
|
title: ControlNet
|
||||||
|
- local: training/t2i_adapters
|
||||||
|
title: T2I-Adapters
|
||||||
|
- local: training/instructpix2pix
|
||||||
|
title: InstructPix2Pix
|
||||||
|
- local: training/cogvideox
|
||||||
|
title: CogVideoX
|
||||||
|
title: Models
|
||||||
|
- sections:
|
||||||
|
- local: training/text_inversion
|
||||||
|
title: Textual Inversion
|
||||||
|
- local: training/dreambooth
|
||||||
|
title: DreamBooth
|
||||||
|
- local: training/lora
|
||||||
|
title: LoRA
|
||||||
|
- local: training/custom_diffusion
|
||||||
|
title: Custom Diffusion
|
||||||
|
- local: training/lcm_distill
|
||||||
|
title: Latent Consistency Distillation
|
||||||
|
- local: training/ddpo
|
||||||
|
title: Reinforcement learning training with DDPO
|
||||||
|
title: Methods
|
||||||
|
title: Training
|
||||||
|
- isExpanded: false
|
||||||
|
sections:
|
||||||
|
- local: quantization/overview
|
||||||
|
title: Getting started
|
||||||
|
- local: quantization/bitsandbytes
|
||||||
|
title: bitsandbytes
|
||||||
|
- local: quantization/gguf
|
||||||
|
title: gguf
|
||||||
|
- local: quantization/torchao
|
||||||
|
title: torchao
|
||||||
|
- local: quantization/quanto
|
||||||
|
title: quanto
|
||||||
|
- local: quantization/modelopt
|
||||||
|
title: NVIDIA ModelOpt
|
||||||
|
title: Quantization
|
||||||
|
- isExpanded: false
|
||||||
|
sections:
|
||||||
|
- local: optimization/onnx
|
||||||
|
title: ONNX
|
||||||
|
- local: optimization/open_vino
|
||||||
|
title: OpenVINO
|
||||||
|
- local: optimization/coreml
|
||||||
|
title: Core ML
|
||||||
|
- local: optimization/mps
|
||||||
|
title: Metal Performance Shaders (MPS)
|
||||||
|
- local: optimization/habana
|
||||||
|
title: Intel Gaudi
|
||||||
|
- local: optimization/neuron
|
||||||
|
title: AWS Neuron
|
||||||
|
title: Model accelerators and hardware
|
||||||
|
- isExpanded: false
|
||||||
|
sections:
|
||||||
- local: using-diffusers/consisid
|
- local: using-diffusers/consisid
|
||||||
title: ConsisID
|
title: ConsisID
|
||||||
- local: using-diffusers/sdxl
|
- local: using-diffusers/sdxl
|
||||||
@@ -119,101 +222,28 @@
|
|||||||
- local: using-diffusers/marigold_usage
|
- local: using-diffusers/marigold_usage
|
||||||
title: Marigold Computer Vision
|
title: Marigold Computer Vision
|
||||||
title: Specific pipeline examples
|
title: Specific pipeline examples
|
||||||
- sections:
|
- isExpanded: false
|
||||||
- local: training/overview
|
sections:
|
||||||
title: Overview
|
- sections:
|
||||||
- local: training/create_dataset
|
- local: using-diffusers/unconditional_image_generation
|
||||||
title: Create a dataset for training
|
|
||||||
- local: training/adapt_a_model
|
|
||||||
title: Adapt a model to a new task
|
|
||||||
- isExpanded: false
|
|
||||||
sections:
|
|
||||||
- local: training/unconditional_training
|
|
||||||
title: Unconditional image generation
|
title: Unconditional image generation
|
||||||
- local: training/text2image
|
- local: using-diffusers/conditional_image_generation
|
||||||
title: Text-to-image
|
title: Text-to-image
|
||||||
- local: training/sdxl
|
- local: using-diffusers/img2img
|
||||||
title: Stable Diffusion XL
|
title: Image-to-image
|
||||||
- local: training/kandinsky
|
- local: using-diffusers/inpaint
|
||||||
title: Kandinsky 2.2
|
title: Inpainting
|
||||||
- local: training/wuerstchen
|
- local: advanced_inference/outpaint
|
||||||
title: Wuerstchen
|
title: Outpainting
|
||||||
- local: training/controlnet
|
- local: using-diffusers/text-img2vid
|
||||||
title: ControlNet
|
title: Video generation
|
||||||
- local: training/t2i_adapters
|
- local: using-diffusers/depth2img
|
||||||
title: T2I-Adapters
|
title: Depth-to-image
|
||||||
- local: training/instructpix2pix
|
title: Task recipes
|
||||||
title: InstructPix2Pix
|
- local: using-diffusers/write_own_pipeline
|
||||||
- local: training/cogvideox
|
title: Understanding pipelines, models and schedulers
|
||||||
title: CogVideoX
|
- local: community_projects
|
||||||
title: Models
|
title: Projects built with Diffusers
|
||||||
- isExpanded: false
|
|
||||||
sections:
|
|
||||||
- local: training/text_inversion
|
|
||||||
title: Textual Inversion
|
|
||||||
- local: training/dreambooth
|
|
||||||
title: DreamBooth
|
|
||||||
- local: training/lora
|
|
||||||
title: LoRA
|
|
||||||
- local: training/custom_diffusion
|
|
||||||
title: Custom Diffusion
|
|
||||||
- local: training/lcm_distill
|
|
||||||
title: Latent Consistency Distillation
|
|
||||||
- local: training/ddpo
|
|
||||||
title: Reinforcement learning training with DDPO
|
|
||||||
title: Methods
|
|
||||||
title: Training
|
|
||||||
- sections:
|
|
||||||
- local: quantization/overview
|
|
||||||
title: Getting Started
|
|
||||||
- local: quantization/bitsandbytes
|
|
||||||
title: bitsandbytes
|
|
||||||
- local: quantization/gguf
|
|
||||||
title: gguf
|
|
||||||
- local: quantization/torchao
|
|
||||||
title: torchao
|
|
||||||
- local: quantization/quanto
|
|
||||||
title: quanto
|
|
||||||
title: Quantization Methods
|
|
||||||
- sections:
|
|
||||||
- local: optimization/fp16
|
|
||||||
title: Accelerate inference
|
|
||||||
- local: optimization/memory
|
|
||||||
title: Reduce memory usage
|
|
||||||
- local: optimization/torch2.0
|
|
||||||
title: PyTorch 2.0
|
|
||||||
- local: optimization/xformers
|
|
||||||
title: xFormers
|
|
||||||
- local: optimization/tome
|
|
||||||
title: Token merging
|
|
||||||
- local: optimization/deepcache
|
|
||||||
title: DeepCache
|
|
||||||
- local: optimization/tgate
|
|
||||||
title: TGATE
|
|
||||||
- local: optimization/xdit
|
|
||||||
title: xDiT
|
|
||||||
- local: optimization/para_attn
|
|
||||||
title: ParaAttention
|
|
||||||
- sections:
|
|
||||||
- local: using-diffusers/stable_diffusion_jax_how_to
|
|
||||||
title: JAX/Flax
|
|
||||||
- local: optimization/onnx
|
|
||||||
title: ONNX
|
|
||||||
- local: optimization/open_vino
|
|
||||||
title: OpenVINO
|
|
||||||
- local: optimization/coreml
|
|
||||||
title: Core ML
|
|
||||||
title: Optimized model formats
|
|
||||||
- sections:
|
|
||||||
- local: optimization/mps
|
|
||||||
title: Metal Performance Shaders (MPS)
|
|
||||||
- local: optimization/habana
|
|
||||||
title: Habana Gaudi
|
|
||||||
- local: optimization/neuron
|
|
||||||
title: AWS Neuron
|
|
||||||
title: Optimized hardware
|
|
||||||
title: Accelerate inference and reduce memory
|
|
||||||
- sections:
|
|
||||||
- local: conceptual/philosophy
|
- local: conceptual/philosophy
|
||||||
title: Philosophy
|
title: Philosophy
|
||||||
- local: using-diffusers/controlling_generation
|
- local: using-diffusers/controlling_generation
|
||||||
@@ -224,14 +254,10 @@
|
|||||||
title: Diffusers' Ethical Guidelines
|
title: Diffusers' Ethical Guidelines
|
||||||
- local: conceptual/evaluation
|
- local: conceptual/evaluation
|
||||||
title: Evaluating Diffusion Models
|
title: Evaluating Diffusion Models
|
||||||
title: Conceptual Guides
|
title: Resources
|
||||||
- sections:
|
- isExpanded: false
|
||||||
- local: community_projects
|
sections:
|
||||||
title: Projects built with Diffusers
|
- sections:
|
||||||
title: Community Projects
|
|
||||||
- sections:
|
|
||||||
- isExpanded: false
|
|
||||||
sections:
|
|
||||||
- local: api/configuration
|
- local: api/configuration
|
||||||
title: Configuration
|
title: Configuration
|
||||||
- local: api/logging
|
- local: api/logging
|
||||||
@@ -240,9 +266,22 @@
|
|||||||
title: Outputs
|
title: Outputs
|
||||||
- local: api/quantization
|
- local: api/quantization
|
||||||
title: Quantization
|
title: Quantization
|
||||||
|
- local: api/parallel
|
||||||
|
title: Parallel inference
|
||||||
title: Main Classes
|
title: Main Classes
|
||||||
- isExpanded: false
|
- sections:
|
||||||
sections:
|
- local: api/modular_diffusers/pipeline
|
||||||
|
title: Pipeline
|
||||||
|
- local: api/modular_diffusers/pipeline_blocks
|
||||||
|
title: Blocks
|
||||||
|
- local: api/modular_diffusers/pipeline_states
|
||||||
|
title: States
|
||||||
|
- local: api/modular_diffusers/pipeline_components
|
||||||
|
title: Components and configs
|
||||||
|
- local: api/modular_diffusers/guiders
|
||||||
|
title: Guiders
|
||||||
|
title: Modular
|
||||||
|
- sections:
|
||||||
- local: api/loaders/ip_adapter
|
- local: api/loaders/ip_adapter
|
||||||
title: IP-Adapter
|
title: IP-Adapter
|
||||||
- local: api/loaders/lora
|
- local: api/loaders/lora
|
||||||
@@ -258,8 +297,7 @@
|
|||||||
- local: api/loaders/peft
|
- local: api/loaders/peft
|
||||||
title: PEFT
|
title: PEFT
|
||||||
title: Loaders
|
title: Loaders
|
||||||
- isExpanded: false
|
- sections:
|
||||||
sections:
|
|
||||||
- local: api/models/overview
|
- local: api/models/overview
|
||||||
title: Overview
|
title: Overview
|
||||||
- local: api/models/auto_model
|
- local: api/models/auto_model
|
||||||
@@ -285,6 +323,12 @@
|
|||||||
title: AllegroTransformer3DModel
|
title: AllegroTransformer3DModel
|
||||||
- local: api/models/aura_flow_transformer2d
|
- local: api/models/aura_flow_transformer2d
|
||||||
title: AuraFlowTransformer2DModel
|
title: AuraFlowTransformer2DModel
|
||||||
|
- local: api/models/transformer_bria_fibo
|
||||||
|
title: BriaFiboTransformer2DModel
|
||||||
|
- local: api/models/bria_transformer
|
||||||
|
title: BriaTransformer2DModel
|
||||||
|
- local: api/models/chroma_transformer
|
||||||
|
title: ChromaTransformer2DModel
|
||||||
- local: api/models/cogvideox_transformer3d
|
- local: api/models/cogvideox_transformer3d
|
||||||
title: CogVideoXTransformer3DModel
|
title: CogVideoXTransformer3DModel
|
||||||
- local: api/models/cogview3plus_transformer2d
|
- local: api/models/cogview3plus_transformer2d
|
||||||
@@ -305,6 +349,8 @@
|
|||||||
title: HiDreamImageTransformer2DModel
|
title: HiDreamImageTransformer2DModel
|
||||||
- local: api/models/hunyuan_transformer2d
|
- local: api/models/hunyuan_transformer2d
|
||||||
title: HunyuanDiT2DModel
|
title: HunyuanDiT2DModel
|
||||||
|
- local: api/models/hunyuanimage_transformer_2d
|
||||||
|
title: HunyuanImageTransformer2DModel
|
||||||
- local: api/models/hunyuan_video_transformer_3d
|
- local: api/models/hunyuan_video_transformer_3d
|
||||||
title: HunyuanVideoTransformer3DModel
|
title: HunyuanVideoTransformer3DModel
|
||||||
- local: api/models/latte_transformer3d
|
- local: api/models/latte_transformer3d
|
||||||
@@ -323,10 +369,14 @@
|
|||||||
title: PixArtTransformer2DModel
|
title: PixArtTransformer2DModel
|
||||||
- local: api/models/prior_transformer
|
- local: api/models/prior_transformer
|
||||||
title: PriorTransformer
|
title: PriorTransformer
|
||||||
|
- local: api/models/qwenimage_transformer2d
|
||||||
|
title: QwenImageTransformer2DModel
|
||||||
- local: api/models/sana_transformer2d
|
- local: api/models/sana_transformer2d
|
||||||
title: SanaTransformer2DModel
|
title: SanaTransformer2DModel
|
||||||
- local: api/models/sd3_transformer2d
|
- local: api/models/sd3_transformer2d
|
||||||
title: SD3Transformer2DModel
|
title: SD3Transformer2DModel
|
||||||
|
- local: api/models/skyreels_v2_transformer_3d
|
||||||
|
title: SkyReelsV2Transformer3DModel
|
||||||
- local: api/models/stable_audio_transformer
|
- local: api/models/stable_audio_transformer
|
||||||
title: StableAudioDiTModel
|
title: StableAudioDiTModel
|
||||||
- local: api/models/transformer2d
|
- local: api/models/transformer2d
|
||||||
@@ -365,6 +415,10 @@
|
|||||||
title: AutoencoderKLCogVideoX
|
title: AutoencoderKLCogVideoX
|
||||||
- local: api/models/autoencoderkl_cosmos
|
- local: api/models/autoencoderkl_cosmos
|
||||||
title: AutoencoderKLCosmos
|
title: AutoencoderKLCosmos
|
||||||
|
- local: api/models/autoencoder_kl_hunyuanimage
|
||||||
|
title: AutoencoderKLHunyuanImage
|
||||||
|
- local: api/models/autoencoder_kl_hunyuanimage_refiner
|
||||||
|
title: AutoencoderKLHunyuanImageRefiner
|
||||||
- local: api/models/autoencoder_kl_hunyuan_video
|
- local: api/models/autoencoder_kl_hunyuan_video
|
||||||
title: AutoencoderKLHunyuanVideo
|
title: AutoencoderKLHunyuanVideo
|
||||||
- local: api/models/autoencoderkl_ltx_video
|
- local: api/models/autoencoderkl_ltx_video
|
||||||
@@ -373,6 +427,8 @@
|
|||||||
title: AutoencoderKLMagvit
|
title: AutoencoderKLMagvit
|
||||||
- local: api/models/autoencoderkl_mochi
|
- local: api/models/autoencoderkl_mochi
|
||||||
title: AutoencoderKLMochi
|
title: AutoencoderKLMochi
|
||||||
|
- local: api/models/autoencoderkl_qwenimage
|
||||||
|
title: AutoencoderKLQwenImage
|
||||||
- local: api/models/autoencoder_kl_wan
|
- local: api/models/autoencoder_kl_wan
|
||||||
title: AutoencoderKLWan
|
title: AutoencoderKLWan
|
||||||
- local: api/models/consistency_decoder_vae
|
- local: api/models/consistency_decoder_vae
|
||||||
@@ -385,203 +441,224 @@
|
|||||||
title: VQModel
|
title: VQModel
|
||||||
title: VAEs
|
title: VAEs
|
||||||
title: Models
|
title: Models
|
||||||
- isExpanded: false
|
- sections:
|
||||||
sections:
|
|
||||||
- local: api/pipelines/overview
|
- local: api/pipelines/overview
|
||||||
title: Overview
|
title: Overview
|
||||||
- local: api/pipelines/allegro
|
- sections:
|
||||||
title: Allegro
|
- local: api/pipelines/audioldm
|
||||||
- local: api/pipelines/amused
|
title: AudioLDM
|
||||||
title: aMUSEd
|
- local: api/pipelines/audioldm2
|
||||||
- local: api/pipelines/animatediff
|
title: AudioLDM 2
|
||||||
title: AnimateDiff
|
- local: api/pipelines/dance_diffusion
|
||||||
- local: api/pipelines/attend_and_excite
|
title: Dance Diffusion
|
||||||
title: Attend-and-Excite
|
- local: api/pipelines/musicldm
|
||||||
- local: api/pipelines/audioldm
|
title: MusicLDM
|
||||||
title: AudioLDM
|
- local: api/pipelines/stable_audio
|
||||||
- local: api/pipelines/audioldm2
|
title: Stable Audio
|
||||||
title: AudioLDM 2
|
title: Audio
|
||||||
- local: api/pipelines/aura_flow
|
|
||||||
title: AuraFlow
|
|
||||||
- local: api/pipelines/auto_pipeline
|
- local: api/pipelines/auto_pipeline
|
||||||
title: AutoPipeline
|
title: AutoPipeline
|
||||||
- local: api/pipelines/blip_diffusion
|
|
||||||
title: BLIP-Diffusion
|
|
||||||
- local: api/pipelines/cogvideox
|
|
||||||
title: CogVideoX
|
|
||||||
- local: api/pipelines/cogview3
|
|
||||||
title: CogView3
|
|
||||||
- local: api/pipelines/cogview4
|
|
||||||
title: CogView4
|
|
||||||
- local: api/pipelines/consisid
|
|
||||||
title: ConsisID
|
|
||||||
- local: api/pipelines/consistency_models
|
|
||||||
title: Consistency Models
|
|
||||||
- local: api/pipelines/controlnet
|
|
||||||
title: ControlNet
|
|
||||||
- local: api/pipelines/controlnet_flux
|
|
||||||
title: ControlNet with Flux.1
|
|
||||||
- local: api/pipelines/controlnet_hunyuandit
|
|
||||||
title: ControlNet with Hunyuan-DiT
|
|
||||||
- local: api/pipelines/controlnet_sd3
|
|
||||||
title: ControlNet with Stable Diffusion 3
|
|
||||||
- local: api/pipelines/controlnet_sdxl
|
|
||||||
title: ControlNet with Stable Diffusion XL
|
|
||||||
- local: api/pipelines/controlnet_sana
|
|
||||||
title: ControlNet-Sana
|
|
||||||
- local: api/pipelines/controlnetxs
|
|
||||||
title: ControlNet-XS
|
|
||||||
- local: api/pipelines/controlnetxs_sdxl
|
|
||||||
title: ControlNet-XS with Stable Diffusion XL
|
|
||||||
- local: api/pipelines/controlnet_union
|
|
||||||
title: ControlNetUnion
|
|
||||||
- local: api/pipelines/cosmos
|
|
||||||
title: Cosmos
|
|
||||||
- local: api/pipelines/dance_diffusion
|
|
||||||
title: Dance Diffusion
|
|
||||||
- local: api/pipelines/ddim
|
|
||||||
title: DDIM
|
|
||||||
- local: api/pipelines/ddpm
|
|
||||||
title: DDPM
|
|
||||||
- local: api/pipelines/deepfloyd_if
|
|
||||||
title: DeepFloyd IF
|
|
||||||
- local: api/pipelines/diffedit
|
|
||||||
title: DiffEdit
|
|
||||||
- local: api/pipelines/dit
|
|
||||||
title: DiT
|
|
||||||
- local: api/pipelines/easyanimate
|
|
||||||
title: EasyAnimate
|
|
||||||
- local: api/pipelines/flux
|
|
||||||
title: Flux
|
|
||||||
- local: api/pipelines/control_flux_inpaint
|
|
||||||
title: FluxControlInpaint
|
|
||||||
- local: api/pipelines/framepack
|
|
||||||
title: Framepack
|
|
||||||
- local: api/pipelines/hidream
|
|
||||||
title: HiDream-I1
|
|
||||||
- local: api/pipelines/hunyuandit
|
|
||||||
title: Hunyuan-DiT
|
|
||||||
- local: api/pipelines/hunyuan_video
|
|
||||||
title: HunyuanVideo
|
|
||||||
- local: api/pipelines/i2vgenxl
|
|
||||||
title: I2VGen-XL
|
|
||||||
- local: api/pipelines/pix2pix
|
|
||||||
title: InstructPix2Pix
|
|
||||||
- local: api/pipelines/kandinsky
|
|
||||||
title: Kandinsky 2.1
|
|
||||||
- local: api/pipelines/kandinsky_v22
|
|
||||||
title: Kandinsky 2.2
|
|
||||||
- local: api/pipelines/kandinsky3
|
|
||||||
title: Kandinsky 3
|
|
||||||
- local: api/pipelines/kolors
|
|
||||||
title: Kolors
|
|
||||||
- local: api/pipelines/latent_consistency_models
|
|
||||||
title: Latent Consistency Models
|
|
||||||
- local: api/pipelines/latent_diffusion
|
|
||||||
title: Latent Diffusion
|
|
||||||
- local: api/pipelines/latte
|
|
||||||
title: Latte
|
|
||||||
- local: api/pipelines/ledits_pp
|
|
||||||
title: LEDITS++
|
|
||||||
- local: api/pipelines/ltx_video
|
|
||||||
title: LTXVideo
|
|
||||||
- local: api/pipelines/lumina2
|
|
||||||
title: Lumina 2.0
|
|
||||||
- local: api/pipelines/lumina
|
|
||||||
title: Lumina-T2X
|
|
||||||
- local: api/pipelines/marigold
|
|
||||||
title: Marigold
|
|
||||||
- local: api/pipelines/mochi
|
|
||||||
title: Mochi
|
|
||||||
- local: api/pipelines/panorama
|
|
||||||
title: MultiDiffusion
|
|
||||||
- local: api/pipelines/musicldm
|
|
||||||
title: MusicLDM
|
|
||||||
- local: api/pipelines/omnigen
|
|
||||||
title: OmniGen
|
|
||||||
- local: api/pipelines/pag
|
|
||||||
title: PAG
|
|
||||||
- local: api/pipelines/paint_by_example
|
|
||||||
title: Paint by Example
|
|
||||||
- local: api/pipelines/pia
|
|
||||||
title: Personalized Image Animator (PIA)
|
|
||||||
- local: api/pipelines/pixart
|
|
||||||
title: PixArt-α
|
|
||||||
- local: api/pipelines/pixart_sigma
|
|
||||||
title: PixArt-Σ
|
|
||||||
- local: api/pipelines/sana
|
|
||||||
title: Sana
|
|
||||||
- local: api/pipelines/sana_sprint
|
|
||||||
title: Sana Sprint
|
|
||||||
- local: api/pipelines/self_attention_guidance
|
|
||||||
title: Self-Attention Guidance
|
|
||||||
- local: api/pipelines/semantic_stable_diffusion
|
|
||||||
title: Semantic Guidance
|
|
||||||
- local: api/pipelines/shap_e
|
|
||||||
title: Shap-E
|
|
||||||
- local: api/pipelines/stable_audio
|
|
||||||
title: Stable Audio
|
|
||||||
- local: api/pipelines/stable_cascade
|
|
||||||
title: Stable Cascade
|
|
||||||
- sections:
|
- sections:
|
||||||
- local: api/pipelines/stable_diffusion/overview
|
- local: api/pipelines/amused
|
||||||
title: Overview
|
title: aMUSEd
|
||||||
- local: api/pipelines/stable_diffusion/depth2img
|
- local: api/pipelines/animatediff
|
||||||
title: Depth-to-image
|
title: AnimateDiff
|
||||||
- local: api/pipelines/stable_diffusion/gligen
|
- local: api/pipelines/attend_and_excite
|
||||||
title: GLIGEN (Grounded Language-to-Image Generation)
|
title: Attend-and-Excite
|
||||||
- local: api/pipelines/stable_diffusion/image_variation
|
- local: api/pipelines/aura_flow
|
||||||
title: Image variation
|
title: AuraFlow
|
||||||
- local: api/pipelines/stable_diffusion/img2img
|
- local: api/pipelines/blip_diffusion
|
||||||
title: Image-to-image
|
title: BLIP-Diffusion
|
||||||
|
- local: api/pipelines/bria_3_2
|
||||||
|
title: Bria 3.2
|
||||||
|
- local: api/pipelines/bria_fibo
|
||||||
|
title: Bria Fibo
|
||||||
|
- local: api/pipelines/chroma
|
||||||
|
title: Chroma
|
||||||
|
- local: api/pipelines/cogview3
|
||||||
|
title: CogView3
|
||||||
|
- local: api/pipelines/cogview4
|
||||||
|
title: CogView4
|
||||||
|
- local: api/pipelines/consistency_models
|
||||||
|
title: Consistency Models
|
||||||
|
- local: api/pipelines/controlnet
|
||||||
|
title: ControlNet
|
||||||
|
- local: api/pipelines/controlnet_flux
|
||||||
|
title: ControlNet with Flux.1
|
||||||
|
- local: api/pipelines/controlnet_hunyuandit
|
||||||
|
title: ControlNet with Hunyuan-DiT
|
||||||
|
- local: api/pipelines/controlnet_sd3
|
||||||
|
title: ControlNet with Stable Diffusion 3
|
||||||
|
- local: api/pipelines/controlnet_sdxl
|
||||||
|
title: ControlNet with Stable Diffusion XL
|
||||||
|
- local: api/pipelines/controlnet_sana
|
||||||
|
title: ControlNet-Sana
|
||||||
|
- local: api/pipelines/controlnetxs
|
||||||
|
title: ControlNet-XS
|
||||||
|
- local: api/pipelines/controlnetxs_sdxl
|
||||||
|
title: ControlNet-XS with Stable Diffusion XL
|
||||||
|
- local: api/pipelines/controlnet_union
|
||||||
|
title: ControlNetUnion
|
||||||
|
- local: api/pipelines/cosmos
|
||||||
|
title: Cosmos
|
||||||
|
- local: api/pipelines/ddim
|
||||||
|
title: DDIM
|
||||||
|
- local: api/pipelines/ddpm
|
||||||
|
title: DDPM
|
||||||
|
- local: api/pipelines/deepfloyd_if
|
||||||
|
title: DeepFloyd IF
|
||||||
|
- local: api/pipelines/diffedit
|
||||||
|
title: DiffEdit
|
||||||
|
- local: api/pipelines/dit
|
||||||
|
title: DiT
|
||||||
|
- local: api/pipelines/easyanimate
|
||||||
|
title: EasyAnimate
|
||||||
|
- local: api/pipelines/flux
|
||||||
|
title: Flux
|
||||||
|
- local: api/pipelines/control_flux_inpaint
|
||||||
|
title: FluxControlInpaint
|
||||||
|
- local: api/pipelines/hidream
|
||||||
|
title: HiDream-I1
|
||||||
|
- local: api/pipelines/hunyuandit
|
||||||
|
title: Hunyuan-DiT
|
||||||
|
- local: api/pipelines/pix2pix
|
||||||
|
title: InstructPix2Pix
|
||||||
|
- local: api/pipelines/kandinsky
|
||||||
|
title: Kandinsky 2.1
|
||||||
|
- local: api/pipelines/kandinsky_v22
|
||||||
|
title: Kandinsky 2.2
|
||||||
|
- local: api/pipelines/kandinsky3
|
||||||
|
title: Kandinsky 3
|
||||||
|
- local: api/pipelines/kandinsky5
|
||||||
|
title: Kandinsky 5
|
||||||
|
- local: api/pipelines/kolors
|
||||||
|
title: Kolors
|
||||||
|
- local: api/pipelines/latent_consistency_models
|
||||||
|
title: Latent Consistency Models
|
||||||
|
- local: api/pipelines/latent_diffusion
|
||||||
|
title: Latent Diffusion
|
||||||
|
- local: api/pipelines/ledits_pp
|
||||||
|
title: LEDITS++
|
||||||
|
- local: api/pipelines/lumina2
|
||||||
|
title: Lumina 2.0
|
||||||
|
- local: api/pipelines/lumina
|
||||||
|
title: Lumina-T2X
|
||||||
|
- local: api/pipelines/marigold
|
||||||
|
title: Marigold
|
||||||
|
- local: api/pipelines/panorama
|
||||||
|
title: MultiDiffusion
|
||||||
|
- local: api/pipelines/omnigen
|
||||||
|
title: OmniGen
|
||||||
|
- local: api/pipelines/pag
|
||||||
|
title: PAG
|
||||||
|
- local: api/pipelines/paint_by_example
|
||||||
|
title: Paint by Example
|
||||||
|
- local: api/pipelines/pixart
|
||||||
|
title: PixArt-α
|
||||||
|
- local: api/pipelines/pixart_sigma
|
||||||
|
title: PixArt-Σ
|
||||||
|
- local: api/pipelines/prx
|
||||||
|
title: PRX
|
||||||
|
- local: api/pipelines/qwenimage
|
||||||
|
title: QwenImage
|
||||||
|
- local: api/pipelines/sana
|
||||||
|
title: Sana
|
||||||
|
- local: api/pipelines/sana_sprint
|
||||||
|
title: Sana Sprint
|
||||||
|
- local: api/pipelines/self_attention_guidance
|
||||||
|
title: Self-Attention Guidance
|
||||||
|
- local: api/pipelines/semantic_stable_diffusion
|
||||||
|
title: Semantic Guidance
|
||||||
|
- local: api/pipelines/shap_e
|
||||||
|
title: Shap-E
|
||||||
|
- local: api/pipelines/stable_cascade
|
||||||
|
title: Stable Cascade
|
||||||
|
- sections:
|
||||||
|
- local: api/pipelines/stable_diffusion/overview
|
||||||
|
title: Overview
|
||||||
|
- local: api/pipelines/stable_diffusion/depth2img
|
||||||
|
title: Depth-to-image
|
||||||
|
- local: api/pipelines/stable_diffusion/gligen
|
||||||
|
title: GLIGEN (Grounded Language-to-Image Generation)
|
||||||
|
- local: api/pipelines/stable_diffusion/image_variation
|
||||||
|
title: Image variation
|
||||||
|
- local: api/pipelines/stable_diffusion/img2img
|
||||||
|
title: Image-to-image
|
||||||
|
- local: api/pipelines/stable_diffusion/inpaint
|
||||||
|
title: Inpainting
|
||||||
|
- local: api/pipelines/stable_diffusion/k_diffusion
|
||||||
|
title: K-Diffusion
|
||||||
|
- local: api/pipelines/stable_diffusion/latent_upscale
|
||||||
|
title: Latent upscaler
|
||||||
|
- local: api/pipelines/stable_diffusion/ldm3d_diffusion
|
||||||
|
title: LDM3D Text-to-(RGB, Depth), Text-to-(RGB-pano, Depth-pano), LDM3D
|
||||||
|
Upscaler
|
||||||
|
- local: api/pipelines/stable_diffusion/stable_diffusion_safe
|
||||||
|
title: Safe Stable Diffusion
|
||||||
|
- local: api/pipelines/stable_diffusion/sdxl_turbo
|
||||||
|
title: SDXL Turbo
|
||||||
|
- local: api/pipelines/stable_diffusion/stable_diffusion_2
|
||||||
|
title: Stable Diffusion 2
|
||||||
|
- local: api/pipelines/stable_diffusion/stable_diffusion_3
|
||||||
|
title: Stable Diffusion 3
|
||||||
|
- local: api/pipelines/stable_diffusion/stable_diffusion_xl
|
||||||
|
title: Stable Diffusion XL
|
||||||
|
- local: api/pipelines/stable_diffusion/upscale
|
||||||
|
title: Super-resolution
|
||||||
|
- local: api/pipelines/stable_diffusion/adapter
|
||||||
|
title: T2I-Adapter
|
||||||
|
- local: api/pipelines/stable_diffusion/text2img
|
||||||
|
title: Text-to-image
|
||||||
|
title: Stable Diffusion
|
||||||
|
- local: api/pipelines/stable_unclip
|
||||||
|
title: Stable unCLIP
|
||||||
|
- local: api/pipelines/unclip
|
||||||
|
title: unCLIP
|
||||||
|
- local: api/pipelines/unidiffuser
|
||||||
|
title: UniDiffuser
|
||||||
|
- local: api/pipelines/value_guided_sampling
|
||||||
|
title: Value-guided sampling
|
||||||
|
- local: api/pipelines/visualcloze
|
||||||
|
title: VisualCloze
|
||||||
|
- local: api/pipelines/wuerstchen
|
||||||
|
title: Wuerstchen
|
||||||
|
title: Image
|
||||||
|
- sections:
|
||||||
|
- local: api/pipelines/allegro
|
||||||
|
title: Allegro
|
||||||
|
- local: api/pipelines/cogvideox
|
||||||
|
title: CogVideoX
|
||||||
|
- local: api/pipelines/consisid
|
||||||
|
title: ConsisID
|
||||||
|
- local: api/pipelines/framepack
|
||||||
|
title: Framepack
|
||||||
|
- local: api/pipelines/hunyuanimage21
|
||||||
|
title: HunyuanImage2.1
|
||||||
|
- local: api/pipelines/hunyuan_video
|
||||||
|
title: HunyuanVideo
|
||||||
|
- local: api/pipelines/i2vgenxl
|
||||||
|
title: I2VGen-XL
|
||||||
|
- local: api/pipelines/latte
|
||||||
|
title: Latte
|
||||||
|
- local: api/pipelines/ltx_video
|
||||||
|
title: LTXVideo
|
||||||
|
- local: api/pipelines/mochi
|
||||||
|
title: Mochi
|
||||||
|
- local: api/pipelines/pia
|
||||||
|
title: Personalized Image Animator (PIA)
|
||||||
|
- local: api/pipelines/skyreels_v2
|
||||||
|
title: SkyReels-V2
|
||||||
- local: api/pipelines/stable_diffusion/svd
|
- local: api/pipelines/stable_diffusion/svd
|
||||||
title: Image-to-video
|
title: Stable Video Diffusion
|
||||||
- local: api/pipelines/stable_diffusion/inpaint
|
- local: api/pipelines/text_to_video
|
||||||
title: Inpainting
|
title: Text-to-video
|
||||||
- local: api/pipelines/stable_diffusion/k_diffusion
|
- local: api/pipelines/text_to_video_zero
|
||||||
title: K-Diffusion
|
title: Text2Video-Zero
|
||||||
- local: api/pipelines/stable_diffusion/latent_upscale
|
- local: api/pipelines/wan
|
||||||
title: Latent upscaler
|
title: Wan
|
||||||
- local: api/pipelines/stable_diffusion/ldm3d_diffusion
|
title: Video
|
||||||
title: LDM3D Text-to-(RGB, Depth), Text-to-(RGB-pano, Depth-pano), LDM3D Upscaler
|
|
||||||
- local: api/pipelines/stable_diffusion/stable_diffusion_safe
|
|
||||||
title: Safe Stable Diffusion
|
|
||||||
- local: api/pipelines/stable_diffusion/sdxl_turbo
|
|
||||||
title: SDXL Turbo
|
|
||||||
- local: api/pipelines/stable_diffusion/stable_diffusion_2
|
|
||||||
title: Stable Diffusion 2
|
|
||||||
- local: api/pipelines/stable_diffusion/stable_diffusion_3
|
|
||||||
title: Stable Diffusion 3
|
|
||||||
- local: api/pipelines/stable_diffusion/stable_diffusion_xl
|
|
||||||
title: Stable Diffusion XL
|
|
||||||
- local: api/pipelines/stable_diffusion/upscale
|
|
||||||
title: Super-resolution
|
|
||||||
- local: api/pipelines/stable_diffusion/adapter
|
|
||||||
title: T2I-Adapter
|
|
||||||
- local: api/pipelines/stable_diffusion/text2img
|
|
||||||
title: Text-to-image
|
|
||||||
title: Stable Diffusion
|
|
||||||
- local: api/pipelines/stable_unclip
|
|
||||||
title: Stable unCLIP
|
|
||||||
- local: api/pipelines/text_to_video
|
|
||||||
title: Text-to-video
|
|
||||||
- local: api/pipelines/text_to_video_zero
|
|
||||||
title: Text2Video-Zero
|
|
||||||
- local: api/pipelines/unclip
|
|
||||||
title: unCLIP
|
|
||||||
- local: api/pipelines/unidiffuser
|
|
||||||
title: UniDiffuser
|
|
||||||
- local: api/pipelines/value_guided_sampling
|
|
||||||
title: Value-guided sampling
|
|
||||||
- local: api/pipelines/visualcloze
|
|
||||||
title: VisualCloze
|
|
||||||
- local: api/pipelines/wan
|
|
||||||
title: Wan
|
|
||||||
- local: api/pipelines/wuerstchen
|
|
||||||
title: Wuerstchen
|
|
||||||
title: Pipelines
|
title: Pipelines
|
||||||
- isExpanded: false
|
- sections:
|
||||||
sections:
|
|
||||||
- local: api/schedulers/overview
|
- local: api/schedulers/overview
|
||||||
title: Overview
|
title: Overview
|
||||||
- local: api/schedulers/cm_stochastic_iterative
|
- local: api/schedulers/cm_stochastic_iterative
|
||||||
@@ -651,8 +728,7 @@
|
|||||||
- local: api/schedulers/vq_diffusion
|
- local: api/schedulers/vq_diffusion
|
||||||
title: VQDiffusionScheduler
|
title: VQDiffusionScheduler
|
||||||
title: Schedulers
|
title: Schedulers
|
||||||
- isExpanded: false
|
- sections:
|
||||||
sections:
|
|
||||||
- local: api/internal_classes_overview
|
- local: api/internal_classes_overview
|
||||||
title: Overview
|
title: Overview
|
||||||
- local: api/attnprocessor
|
- local: api/attnprocessor
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
@@ -11,72 +11,26 @@ specific language governing permissions and limitations under the License. -->
|
|||||||
|
|
||||||
# Caching methods
|
# Caching methods
|
||||||
|
|
||||||
## Pyramid Attention Broadcast
|
Cache methods speedup diffusion transformers by storing and reusing intermediate outputs of specific layers, such as attention and feedforward layers, instead of recalculating them at each inference step.
|
||||||
|
|
||||||
[Pyramid Attention Broadcast](https://huggingface.co/papers/2408.12588) from Xuanlei Zhao, Xiaolong Jin, Kai Wang, Yang You.
|
## CacheMixin
|
||||||
|
|
||||||
Pyramid Attention Broadcast (PAB) is a method that speeds up inference in diffusion models by systematically skipping attention computations between successive inference steps and reusing cached attention states. The attention states are not very different between successive inference steps. The most prominent difference is in the spatial attention blocks, not as much in the temporal attention blocks, and finally the least in the cross attention blocks. Therefore, many cross attention computation blocks can be skipped, followed by the temporal and spatial attention blocks. By combining other techniques like sequence parallelism and classifier-free guidance parallelism, PAB achieves near real-time video generation.
|
|
||||||
|
|
||||||
Enable PAB with [`~PyramidAttentionBroadcastConfig`] on any pipeline. For some benchmarks, refer to [this](https://github.com/huggingface/diffusers/pull/9562) pull request.
|
|
||||||
|
|
||||||
```python
|
|
||||||
import torch
|
|
||||||
from diffusers import CogVideoXPipeline, PyramidAttentionBroadcastConfig
|
|
||||||
|
|
||||||
pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16)
|
|
||||||
pipe.to("cuda")
|
|
||||||
|
|
||||||
# Increasing the value of `spatial_attention_timestep_skip_range[0]` or decreasing the value of
|
|
||||||
# `spatial_attention_timestep_skip_range[1]` will decrease the interval in which pyramid attention
|
|
||||||
# broadcast is active, leader to slower inference speeds. However, large intervals can lead to
|
|
||||||
# poorer quality of generated videos.
|
|
||||||
config = PyramidAttentionBroadcastConfig(
|
|
||||||
spatial_attention_block_skip_range=2,
|
|
||||||
spatial_attention_timestep_skip_range=(100, 800),
|
|
||||||
current_timestep_callback=lambda: pipe.current_timestep,
|
|
||||||
)
|
|
||||||
pipe.transformer.enable_cache(config)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Faster Cache
|
|
||||||
|
|
||||||
[FasterCache](https://huggingface.co/papers/2410.19355) from Zhengyao Lv, Chenyang Si, Junhao Song, Zhenyu Yang, Yu Qiao, Ziwei Liu, Kwan-Yee K. Wong.
|
|
||||||
|
|
||||||
FasterCache is a method that speeds up inference in diffusion transformers by:
|
|
||||||
- Reusing attention states between successive inference steps, due to high similarity between them
|
|
||||||
- Skipping unconditional branch prediction used in classifier-free guidance by revealing redundancies between unconditional and conditional branch outputs for the same timestep, and therefore approximating the unconditional branch output using the conditional branch output
|
|
||||||
|
|
||||||
```python
|
|
||||||
import torch
|
|
||||||
from diffusers import CogVideoXPipeline, FasterCacheConfig
|
|
||||||
|
|
||||||
pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16)
|
|
||||||
pipe.to("cuda")
|
|
||||||
|
|
||||||
config = FasterCacheConfig(
|
|
||||||
spatial_attention_block_skip_range=2,
|
|
||||||
spatial_attention_timestep_skip_range=(-1, 681),
|
|
||||||
current_timestep_callback=lambda: pipe.current_timestep,
|
|
||||||
attention_weight_callback=lambda _: 0.3,
|
|
||||||
unconditional_batch_skip_range=5,
|
|
||||||
unconditional_batch_timestep_skip_range=(-1, 781),
|
|
||||||
tensor_format="BFCHW",
|
|
||||||
)
|
|
||||||
pipe.transformer.enable_cache(config)
|
|
||||||
```
|
|
||||||
|
|
||||||
### CacheMixin
|
|
||||||
|
|
||||||
[[autodoc]] CacheMixin
|
[[autodoc]] CacheMixin
|
||||||
|
|
||||||
### PyramidAttentionBroadcastConfig
|
## PyramidAttentionBroadcastConfig
|
||||||
|
|
||||||
[[autodoc]] PyramidAttentionBroadcastConfig
|
[[autodoc]] PyramidAttentionBroadcastConfig
|
||||||
|
|
||||||
[[autodoc]] apply_pyramid_attention_broadcast
|
[[autodoc]] apply_pyramid_attention_broadcast
|
||||||
|
|
||||||
### FasterCacheConfig
|
## FasterCacheConfig
|
||||||
|
|
||||||
[[autodoc]] FasterCacheConfig
|
[[autodoc]] FasterCacheConfig
|
||||||
|
|
||||||
[[autodoc]] apply_faster_cache
|
[[autodoc]] apply_faster_cache
|
||||||
|
|
||||||
|
### FirstBlockCacheConfig
|
||||||
|
|
||||||
|
[[autodoc]] FirstBlockCacheConfig
|
||||||
|
|
||||||
|
[[autodoc]] apply_first_block_cache
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
@@ -14,11 +14,8 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
Schedulers from [`~schedulers.scheduling_utils.SchedulerMixin`] and models from [`ModelMixin`] inherit from [`ConfigMixin`] which stores all the parameters that are passed to their respective `__init__` methods in a JSON-configuration file.
|
Schedulers from [`~schedulers.scheduling_utils.SchedulerMixin`] and models from [`ModelMixin`] inherit from [`ConfigMixin`] which stores all the parameters that are passed to their respective `__init__` methods in a JSON-configuration file.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf auth login`.
|
||||||
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `huggingface-cli login`.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## ConfigMixin
|
## ConfigMixin
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
@@ -20,6 +20,12 @@ All pipelines with [`VaeImageProcessor`] accept PIL Image, PyTorch tensor, or Nu
|
|||||||
|
|
||||||
[[autodoc]] image_processor.VaeImageProcessor
|
[[autodoc]] image_processor.VaeImageProcessor
|
||||||
|
|
||||||
|
## InpaintProcessor
|
||||||
|
|
||||||
|
The [`InpaintProcessor`] accepts `mask` and `image` inputs and process them together. Optionally, it can accept padding_mask_crop and apply mask overlay.
|
||||||
|
|
||||||
|
[[autodoc]] image_processor.InpaintProcessor
|
||||||
|
|
||||||
## VaeImageProcessorLDM3D
|
## VaeImageProcessorLDM3D
|
||||||
|
|
||||||
The [`VaeImageProcessorLDM3D`] accepts RGB and depth inputs and returns RGB and depth outputs.
|
The [`VaeImageProcessorLDM3D`] accepts RGB and depth inputs and returns RGB and depth outputs.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
@@ -14,11 +14,8 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
[IP-Adapter](https://hf.co/papers/2308.06721) is a lightweight adapter that enables prompting a diffusion model with an image. This method decouples the cross-attention layers of the image and text features. The image features are generated from an image encoder.
|
[IP-Adapter](https://hf.co/papers/2308.06721) is a lightweight adapter that enables prompting a diffusion model with an image. This method decouples the cross-attention layers of the image and text features. The image features are generated from an image encoder.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Learn how to load and use an IP-Adapter checkpoint and image in the [IP-Adapter](../../using-diffusers/ip_adapter) guide,.
|
||||||
Learn how to load an IP-Adapter checkpoint and image in the IP-Adapter [loading](../../using-diffusers/loading_adapters#ip-adapter) guide, and you can see how to use it in the [usage](../../using-diffusers/ip_adapter) guide.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## IPAdapterMixin
|
## IPAdapterMixin
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
@@ -26,16 +26,19 @@ LoRA is a fast and lightweight training method that inserts and trains a signifi
|
|||||||
- [`HunyuanVideoLoraLoaderMixin`] provides similar functions for [HunyuanVideo](https://huggingface.co/docs/diffusers/main/en/api/pipelines/hunyuan_video).
|
- [`HunyuanVideoLoraLoaderMixin`] provides similar functions for [HunyuanVideo](https://huggingface.co/docs/diffusers/main/en/api/pipelines/hunyuan_video).
|
||||||
- [`Lumina2LoraLoaderMixin`] provides similar functions for [Lumina2](https://huggingface.co/docs/diffusers/main/en/api/pipelines/lumina2).
|
- [`Lumina2LoraLoaderMixin`] provides similar functions for [Lumina2](https://huggingface.co/docs/diffusers/main/en/api/pipelines/lumina2).
|
||||||
- [`WanLoraLoaderMixin`] provides similar functions for [Wan](https://huggingface.co/docs/diffusers/main/en/api/pipelines/wan).
|
- [`WanLoraLoaderMixin`] provides similar functions for [Wan](https://huggingface.co/docs/diffusers/main/en/api/pipelines/wan).
|
||||||
|
- [`SkyReelsV2LoraLoaderMixin`] provides similar functions for [SkyReels-V2](https://huggingface.co/docs/diffusers/main/en/api/pipelines/skyreels_v2).
|
||||||
- [`CogView4LoraLoaderMixin`] provides similar functions for [CogView4](https://huggingface.co/docs/diffusers/main/en/api/pipelines/cogview4).
|
- [`CogView4LoraLoaderMixin`] provides similar functions for [CogView4](https://huggingface.co/docs/diffusers/main/en/api/pipelines/cogview4).
|
||||||
- [`AmusedLoraLoaderMixin`] is for the [`AmusedPipeline`].
|
- [`AmusedLoraLoaderMixin`] is for the [`AmusedPipeline`].
|
||||||
- [`HiDreamImageLoraLoaderMixin`] provides similar functions for [HiDream Image](https://huggingface.co/docs/diffusers/main/en/api/pipelines/hidream)
|
- [`HiDreamImageLoraLoaderMixin`] provides similar functions for [HiDream Image](https://huggingface.co/docs/diffusers/main/en/api/pipelines/hidream)
|
||||||
|
- [`QwenImageLoraLoaderMixin`] provides similar functions for [Qwen Image](https://huggingface.co/docs/diffusers/main/en/api/pipelines/qwen)
|
||||||
- [`LoraBaseMixin`] provides a base class with several utility methods to fuse, unfuse, unload, LoRAs and more.
|
- [`LoraBaseMixin`] provides a base class with several utility methods to fuse, unfuse, unload, LoRAs and more.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> To learn more about how to load LoRA weights, see the [LoRA](../../tutorials/using_peft_for_inference) loading guide.
|
||||||
|
|
||||||
To learn more about how to load LoRA weights, see the [LoRA](../../using-diffusers/loading_adapters#lora) loading guide.
|
## LoraBaseMixin
|
||||||
|
|
||||||
</Tip>
|
[[autodoc]] loaders.lora_base.LoraBaseMixin
|
||||||
|
|
||||||
## StableDiffusionLoraLoaderMixin
|
## StableDiffusionLoraLoaderMixin
|
||||||
|
|
||||||
@@ -88,6 +91,10 @@ To learn more about how to load LoRA weights, see the [LoRA](../../using-diffuse
|
|||||||
|
|
||||||
[[autodoc]] loaders.lora_pipeline.WanLoraLoaderMixin
|
[[autodoc]] loaders.lora_pipeline.WanLoraLoaderMixin
|
||||||
|
|
||||||
|
## SkyReelsV2LoraLoaderMixin
|
||||||
|
|
||||||
|
[[autodoc]] loaders.lora_pipeline.SkyReelsV2LoraLoaderMixin
|
||||||
|
|
||||||
## AmusedLoraLoaderMixin
|
## AmusedLoraLoaderMixin
|
||||||
|
|
||||||
[[autodoc]] loaders.lora_pipeline.AmusedLoraLoaderMixin
|
[[autodoc]] loaders.lora_pipeline.AmusedLoraLoaderMixin
|
||||||
@@ -96,6 +103,13 @@ To learn more about how to load LoRA weights, see the [LoRA](../../using-diffuse
|
|||||||
|
|
||||||
[[autodoc]] loaders.lora_pipeline.HiDreamImageLoraLoaderMixin
|
[[autodoc]] loaders.lora_pipeline.HiDreamImageLoraLoaderMixin
|
||||||
|
|
||||||
|
## QwenImageLoraLoaderMixin
|
||||||
|
|
||||||
|
[[autodoc]] loaders.lora_pipeline.QwenImageLoraLoaderMixin
|
||||||
|
|
||||||
|
## KandinskyLoraLoaderMixin
|
||||||
|
[[autodoc]] loaders.lora_pipeline.KandinskyLoraLoaderMixin
|
||||||
|
|
||||||
## LoraBaseMixin
|
## LoraBaseMixin
|
||||||
|
|
||||||
[[autodoc]] loaders.lora_base.LoraBaseMixin
|
[[autodoc]] loaders.lora_base.LoraBaseMixin
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
@@ -12,13 +12,10 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
# PEFT
|
# PEFT
|
||||||
|
|
||||||
Diffusers supports loading adapters such as [LoRA](../../using-diffusers/loading_adapters) with the [PEFT](https://huggingface.co/docs/peft/index) library with the [`~loaders.peft.PeftAdapterMixin`] class. This allows modeling classes in Diffusers like [`UNet2DConditionModel`], [`SD3Transformer2DModel`] to operate with an adapter.
|
Diffusers supports loading adapters such as [LoRA](../../tutorials/using_peft_for_inference) with the [PEFT](https://huggingface.co/docs/peft/index) library with the [`~loaders.peft.PeftAdapterMixin`] class. This allows modeling classes in Diffusers like [`UNet2DConditionModel`], [`SD3Transformer2DModel`] to operate with an adapter.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Refer to the [Inference with PEFT](../../tutorials/using_peft_for_inference.md) tutorial for an overview of how to use PEFT in Diffusers for inference.
|
||||||
Refer to the [Inference with PEFT](../../tutorials/using_peft_for_inference.md) tutorial for an overview of how to use PEFT in Diffusers for inference.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## PeftAdapterMixin
|
## PeftAdapterMixin
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
@@ -16,11 +16,8 @@ Textual Inversion is a training method for personalizing models by learning new
|
|||||||
|
|
||||||
[`TextualInversionLoaderMixin`] provides a function for loading Textual Inversion embeddings from Diffusers and Automatic1111 into the text encoder and loading a special token to activate the embeddings.
|
[`TextualInversionLoaderMixin`] provides a function for loading Textual Inversion embeddings from Diffusers and Automatic1111 into the text encoder and loading a special token to activate the embeddings.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> To learn more about how to load Textual Inversion embeddings, see the [Textual Inversion](../../using-diffusers/textual_inversion_inference) loading guide.
|
||||||
To learn more about how to load Textual Inversion embeddings, see the [Textual Inversion](../../using-diffusers/loading_adapters#textual-inversion) loading guide.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## TextualInversionLoaderMixin
|
## TextualInversionLoaderMixin
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
@@ -16,11 +16,8 @@ This class is useful when *only* loading weights into a [`SD3Transformer2DModel`
|
|||||||
|
|
||||||
The [`SD3Transformer2DLoadersMixin`] class currently only loads IP-Adapter weights, but will be used in the future to save weights and load LoRAs.
|
The [`SD3Transformer2DLoadersMixin`] class currently only loads IP-Adapter weights, but will be used in the future to save weights and load LoRAs.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> To learn more about how to load LoRA weights, see the [LoRA](../../tutorials/using_peft_for_inference) loading guide.
|
||||||
To learn more about how to load LoRA weights, see the [LoRA](../../using-diffusers/loading_adapters#lora) loading guide.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## SD3Transformer2DLoadersMixin
|
## SD3Transformer2DLoadersMixin
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
@@ -16,11 +16,8 @@ Some training methods - like LoRA and Custom Diffusion - typically target the UN
|
|||||||
|
|
||||||
The [`UNet2DConditionLoadersMixin`] class provides functions for loading and saving weights, fusing and unfusing LoRAs, disabling and enabling LoRAs, and setting and deleting adapters.
|
The [`UNet2DConditionLoadersMixin`] class provides functions for loading and saving weights, fusing and unfusing LoRAs, disabling and enabling LoRAs, and setting and deleting adapters.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> To learn more about how to load LoRA weights, see the [LoRA](../../tutorials/using_peft_for_inference) guide.
|
||||||
To learn more about how to load LoRA weights, see the [LoRA](../../using-diffusers/loading_adapters#lora) loading guide.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## UNet2DConditionLoadersMixin
|
## UNet2DConditionLoadersMixin
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
@@ -39,7 +39,7 @@ mask_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images
|
|||||||
original_image = load_image(img_url).resize((512, 512))
|
original_image = load_image(img_url).resize((512, 512))
|
||||||
mask_image = load_image(mask_url).resize((512, 512))
|
mask_image = load_image(mask_url).resize((512, 512))
|
||||||
|
|
||||||
pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
|
pipe = StableDiffusionInpaintPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-inpainting")
|
||||||
pipe.vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5")
|
pipe.vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5")
|
||||||
pipe.to("cuda")
|
pipe.to("cuda")
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
32
docs/source/en/api/models/autoencoder_kl_hunyuanimage.md
Normal file
32
docs/source/en/api/models/autoencoder_kl_hunyuanimage.md
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# AutoencoderKLHunyuanImage
|
||||||
|
|
||||||
|
The 2D variational autoencoder (VAE) model with KL loss used in [HunyuanImage2.1].
|
||||||
|
|
||||||
|
The model can be loaded with the following code snippet.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import AutoencoderKLHunyuanImage
|
||||||
|
|
||||||
|
vae = AutoencoderKLHunyuanImage.from_pretrained("hunyuanvideo-community/HunyuanImage-2.1-Diffusers", subfolder="vae", torch_dtype=torch.bfloat16)
|
||||||
|
```
|
||||||
|
|
||||||
|
## AutoencoderKLHunyuanImage
|
||||||
|
|
||||||
|
[[autodoc]] AutoencoderKLHunyuanImage
|
||||||
|
- decode
|
||||||
|
- all
|
||||||
|
|
||||||
|
## DecoderOutput
|
||||||
|
|
||||||
|
[[autodoc]] models.autoencoders.vae.DecoderOutput
|
||||||
@@ -0,0 +1,32 @@
|
|||||||
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# AutoencoderKLHunyuanImageRefiner
|
||||||
|
|
||||||
|
The 3D variational autoencoder (VAE) model with KL loss used in [HunyuanImage2.1](https://github.com/Tencent-Hunyuan/HunyuanImage-2.1) for its refiner pipeline.
|
||||||
|
|
||||||
|
The model can be loaded with the following code snippet.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import AutoencoderKLHunyuanImageRefiner
|
||||||
|
|
||||||
|
vae = AutoencoderKLHunyuanImageRefiner.from_pretrained("hunyuanvideo-community/HunyuanImage-2.1-Refiner-Diffusers", subfolder="vae", torch_dtype=torch.bfloat16)
|
||||||
|
```
|
||||||
|
|
||||||
|
## AutoencoderKLHunyuanImageRefiner
|
||||||
|
|
||||||
|
[[autodoc]] AutoencoderKLHunyuanImageRefiner
|
||||||
|
- decode
|
||||||
|
- all
|
||||||
|
|
||||||
|
## DecoderOutput
|
||||||
|
|
||||||
|
[[autodoc]] models.autoencoders.vae.DecoderOutput
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
@@ -44,15 +44,3 @@ model = AutoencoderKL.from_single_file(url)
|
|||||||
## DecoderOutput
|
## DecoderOutput
|
||||||
|
|
||||||
[[autodoc]] models.autoencoders.vae.DecoderOutput
|
[[autodoc]] models.autoencoders.vae.DecoderOutput
|
||||||
|
|
||||||
## FlaxAutoencoderKL
|
|
||||||
|
|
||||||
[[autodoc]] FlaxAutoencoderKL
|
|
||||||
|
|
||||||
## FlaxAutoencoderKLOutput
|
|
||||||
|
|
||||||
[[autodoc]] models.vae_flax.FlaxAutoencoderKLOutput
|
|
||||||
|
|
||||||
## FlaxDecoderOutput
|
|
||||||
|
|
||||||
[[autodoc]] models.vae_flax.FlaxDecoderOutput
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
35
docs/source/en/api/models/autoencoderkl_qwenimage.md
Normal file
35
docs/source/en/api/models/autoencoderkl_qwenimage.md
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# AutoencoderKLQwenImage
|
||||||
|
|
||||||
|
The model can be loaded with the following code snippet.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import AutoencoderKLQwenImage
|
||||||
|
|
||||||
|
vae = AutoencoderKLQwenImage.from_pretrained("Qwen/QwenImage-20B", subfolder="vae")
|
||||||
|
```
|
||||||
|
|
||||||
|
## AutoencoderKLQwenImage
|
||||||
|
|
||||||
|
[[autodoc]] AutoencoderKLQwenImage
|
||||||
|
- decode
|
||||||
|
- encode
|
||||||
|
- all
|
||||||
|
|
||||||
|
## AutoencoderKLOutput
|
||||||
|
|
||||||
|
[[autodoc]] models.autoencoders.autoencoder_kl.AutoencoderKLOutput
|
||||||
|
|
||||||
|
## DecoderOutput
|
||||||
|
|
||||||
|
[[autodoc]] models.autoencoders.vae.DecoderOutput
|
||||||
19
docs/source/en/api/models/bria_transformer.md
Normal file
19
docs/source/en/api/models/bria_transformer.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# BriaTransformer2DModel
|
||||||
|
|
||||||
|
A modified flux Transformer model from [Bria](https://huggingface.co/briaai/BRIA-3.2)
|
||||||
|
|
||||||
|
## BriaTransformer2DModel
|
||||||
|
|
||||||
|
[[autodoc]] BriaTransformer2DModel
|
||||||
19
docs/source/en/api/models/chroma_transformer.md
Normal file
19
docs/source/en/api/models/chroma_transformer.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# ChromaTransformer2DModel
|
||||||
|
|
||||||
|
A modified flux Transformer model from [Chroma](https://huggingface.co/lodestones/Chroma1-HD)
|
||||||
|
|
||||||
|
## ChromaTransformer2DModel
|
||||||
|
|
||||||
|
[[autodoc]] ChromaTransformer2DModel
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
@@ -16,11 +16,8 @@ Consistency decoder can be used to decode the latents from the denoising UNet in
|
|||||||
|
|
||||||
The original codebase can be found at [openai/consistencydecoder](https://github.com/openai/consistencydecoder).
|
The original codebase can be found at [openai/consistencydecoder](https://github.com/openai/consistencydecoder).
|
||||||
|
|
||||||
<Tip warning={true}>
|
> [!WARNING]
|
||||||
|
> Inference is only supported for 2 iterations as of now.
|
||||||
Inference is only supported for 2 iterations as of now.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
The pipeline could not have been contributed without the help of [madebyollin](https://github.com/madebyollin) and [mrsteyk](https://github.com/mrsteyk) from [this issue](https://github.com/openai/consistencydecoder/issues/1).
|
The pipeline could not have been contributed without the help of [madebyollin](https://github.com/madebyollin) and [mrsteyk](https://github.com/mrsteyk) from [this issue](https://github.com/openai/consistencydecoder/issues/1).
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
@@ -40,11 +40,3 @@ pipe = StableDiffusionControlNetPipeline.from_single_file(url, controlnet=contro
|
|||||||
## ControlNetOutput
|
## ControlNetOutput
|
||||||
|
|
||||||
[[autodoc]] models.controlnets.controlnet.ControlNetOutput
|
[[autodoc]] models.controlnets.controlnet.ControlNetOutput
|
||||||
|
|
||||||
## FlaxControlNetModel
|
|
||||||
|
|
||||||
[[autodoc]] FlaxControlNetModel
|
|
||||||
|
|
||||||
## FlaxControlNetOutput
|
|
||||||
|
|
||||||
[[autodoc]] models.controlnets.controlnet_flax.FlaxControlNetOutput
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team and The InstantX Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team and The InstantX Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team and Tencent Hunyuan Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team and Tencent Hunyuan Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<!--Copyright 2024 The HuggingFace Team and The InstantX Team. All rights reserved.
|
<!--Copyright 2025 The HuggingFace Team and The InstantX Team. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
the License. You may obtain a copy of the License at
|
the License. You may obtain a copy of the License at
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user