mirror of
https://github.com/huggingface/diffusers.git
synced 2026-02-08 11:55:18 +08:00
Compare commits
412 Commits
move-testi
...
remove-exp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b24f1c084f | ||
|
|
765eb50ff1 | ||
|
|
7ad97d492d | ||
|
|
1ecfbfe12b | ||
|
|
d7fa445453 | ||
|
|
7feb4fc791 | ||
|
|
3c70440d26 | ||
|
|
7299121413 | ||
|
|
3114f6a796 | ||
|
|
463367d31d | ||
|
|
987412b252 | ||
|
|
b30be7d90f | ||
|
|
4cbe1aad54 | ||
|
|
aca3b7845b | ||
|
|
9d68742214 | ||
|
|
1426c33aa5 | ||
|
|
34388bdaa8 | ||
|
|
5ee4e19c58 | ||
|
|
f1a93c765f | ||
|
|
337ac577af | ||
|
|
beede725d1 | ||
|
|
29a930a142 | ||
|
|
4b020c5213 | ||
|
|
3a0efa38f5 | ||
|
|
78233be7b4 | ||
|
|
53a943dca6 | ||
|
|
e60485498b | ||
|
|
19558cbb15 | ||
|
|
0d5218856c | ||
|
|
4e1ce3d417 | ||
|
|
f2ced21349 | ||
|
|
7192d4b5ff | ||
|
|
a72f61a7ab | ||
|
|
f364948359 | ||
|
|
2b1f19d56b | ||
|
|
8063353819 | ||
|
|
d77d61bae4 | ||
|
|
a9af091700 | ||
|
|
db627652b1 | ||
|
|
f9f6758533 | ||
|
|
6983485eed | ||
|
|
dad5cb55e6 | ||
|
|
c13b264589 | ||
|
|
ac3bd4baee | ||
|
|
e47af9bada | ||
|
|
7407adabdc | ||
|
|
8390581b5d | ||
|
|
a2a6abc0d6 | ||
|
|
b86bd99eac | ||
|
|
5b202111bf | ||
|
|
4ac2b4a521 | ||
|
|
418313bbf6 | ||
|
|
2120c3096f | ||
|
|
ed6e5ecf67 | ||
|
|
d44b5f86e6 | ||
|
|
02c7adc356 | ||
|
|
a3cc0e7a52 | ||
|
|
2a6cdc0b3e | ||
|
|
1791306739 | ||
|
|
df6516a716 | ||
|
|
5794ffffbe | ||
|
|
4fb44bdf91 | ||
|
|
b7a81582ae | ||
|
|
4b64b5603f | ||
|
|
2bb640f8ea | ||
|
|
2dc9d2af50 | ||
|
|
57e57cfae0 | ||
|
|
644169433f | ||
|
|
632765a5ee | ||
|
|
d36564f06a | ||
|
|
441b69eabf | ||
|
|
d568c9773f | ||
|
|
3981c955ce | ||
|
|
1903383e94 | ||
|
|
08f8b7af9a | ||
|
|
2f66edc880 | ||
|
|
be38f41f9f | ||
|
|
91e5134175 | ||
|
|
a812c87465 | ||
|
|
8b9f817ef5 | ||
|
|
b1f06b780a | ||
|
|
8600b4c10d | ||
|
|
c10bdd9b73 | ||
|
|
dab000e88b | ||
|
|
9fb6b89d49 | ||
|
|
6fb4c99f5a | ||
|
|
961b9b27d3 | ||
|
|
8f30bfff1f | ||
|
|
b4be29bda2 | ||
|
|
98479a94c2 | ||
|
|
ade1059ae2 | ||
|
|
41a6e86faf | ||
|
|
9b5a244653 | ||
|
|
417f6b2d33 | ||
|
|
e46354d2d0 | ||
|
|
db37140474 | ||
|
|
88ffb00139 | ||
|
|
b6098ca006 | ||
|
|
7c6d314549 | ||
|
|
3138e37fe6 | ||
|
|
0da1aa90b5 | ||
|
|
5ffb65803d | ||
|
|
d0ae34d313 | ||
|
|
47378066c0 | ||
|
|
208cda8f6d | ||
|
|
1cdb8723b8 | ||
|
|
f6b6a7181e | ||
|
|
52766e6a69 | ||
|
|
973a077c6a | ||
|
|
0c4f6c9cff | ||
|
|
262ce19bff | ||
|
|
f7753b1bc8 | ||
|
|
b5309683cb | ||
|
|
55463f7ace | ||
|
|
f9c1e612fb | ||
|
|
87f7d11143 | ||
|
|
5e48f466b9 | ||
|
|
a748a839ad | ||
|
|
58519283e7 | ||
|
|
0c1ccc0775 | ||
|
|
b8a4cbac14 | ||
|
|
17c0e79dbd | ||
|
|
1567243463 | ||
|
|
0eac64c7a6 | ||
|
|
10e820a2dd | ||
|
|
6708f5c76d | ||
|
|
be3c2a0667 | ||
|
|
8b4722de57 | ||
|
|
07ea0786e8 | ||
|
|
54fa0745c3 | ||
|
|
3d02cd543e | ||
|
|
2246d2c7c4 | ||
|
|
671149e036 | ||
|
|
f67639b0bb | ||
|
|
5a74319715 | ||
|
|
6290fdfda4 | ||
|
|
256e010674 | ||
|
|
8430ac2a2f | ||
|
|
bb9e713d02 | ||
|
|
c98c157a9e | ||
|
|
f12d161d67 | ||
|
|
8d415a6f48 | ||
|
|
7de51b826c | ||
|
|
cd00ba685b | ||
|
|
2842c14c5f | ||
|
|
c318686090 | ||
|
|
6028613226 | ||
|
|
a1f36ee3ef | ||
|
|
d96cbacacd | ||
|
|
5ab5946931 | ||
|
|
d0c54e5563 | ||
|
|
1908c47600 | ||
|
|
759ea58708 | ||
|
|
f48f9c250f | ||
|
|
3c05b9f71c | ||
|
|
9379b2391b | ||
|
|
4f136f842c | ||
|
|
edf36f5128 | ||
|
|
564079f295 | ||
|
|
394a48d169 | ||
|
|
99784ae0d2 | ||
|
|
fffd964a0f | ||
|
|
859b809031 | ||
|
|
d769d8a13b | ||
|
|
c25582d509 | ||
|
|
6156cf8f22 | ||
|
|
152f7ca357 | ||
|
|
b010a8ce0c | ||
|
|
1b91856d0e | ||
|
|
01e355516b | ||
|
|
6bf668c4d2 | ||
|
|
e6d4612309 | ||
|
|
a88a7b4f03 | ||
|
|
c8656ed73c | ||
|
|
94c9613f99 | ||
|
|
b91e8c0d0b | ||
|
|
ac7864624b | ||
|
|
5ffb73d4ae | ||
|
|
4088e8a851 | ||
|
|
d33d9f6715 | ||
|
|
dde8754ba2 | ||
|
|
fbcd3ba6b2 | ||
|
|
d176f61fcf | ||
|
|
354d35adb0 | ||
|
|
544ba677dd | ||
|
|
6f1042e36c | ||
|
|
d5da453de5 | ||
|
|
15370f8412 | ||
|
|
a96b145304 | ||
|
|
6d8973ffe2 | ||
|
|
ab71f3c864 | ||
|
|
b7df4a5387 | ||
|
|
67dc65e2e3 | ||
|
|
3579fdabf9 | ||
|
|
1afc21855e | ||
|
|
0c35b580fe | ||
|
|
01a56927f1 | ||
|
|
a9e4883b6a | ||
|
|
63dd601758 | ||
|
|
eeae0338e7 | ||
|
|
3c1ca869d7 | ||
|
|
6fe4a6ff8e | ||
|
|
40de88af8c | ||
|
|
6a2309b98d | ||
|
|
cd3bbe2910 | ||
|
|
7a001c3ee2 | ||
|
|
d8e4805816 | ||
|
|
44c3101685 | ||
|
|
d6c63bb956 | ||
|
|
2f44d63046 | ||
|
|
f3db38c1e7 | ||
|
|
f5e5f34823 | ||
|
|
093cd3f040 | ||
|
|
aecf0c53bf | ||
|
|
0c7589293b | ||
|
|
ff263947ad | ||
|
|
66e6a0215f | ||
|
|
5a47442f92 | ||
|
|
8f6328c4a4 | ||
|
|
8d45f219d0 | ||
|
|
0fd58c7706 | ||
|
|
35d703310c | ||
|
|
b455dc94a2 | ||
|
|
04f9d2bf3d | ||
|
|
bc8fd864eb | ||
|
|
a9cb08af39 | ||
|
|
9f669e7b5d | ||
|
|
8ac17cd2cb | ||
|
|
e4393fa613 | ||
|
|
b3e9dfced7 | ||
|
|
58f3771545 | ||
|
|
6198f8a12b | ||
|
|
dcfb18a2d3 | ||
|
|
ac5a1e28fc | ||
|
|
325a95051b | ||
|
|
1ec28a2c77 | ||
|
|
de6173c683 | ||
|
|
8f80dda193 | ||
|
|
cdbf0ad883 | ||
|
|
5e8415a311 | ||
|
|
e68c936f42 | ||
|
|
051c8a1c0f | ||
|
|
d54622c267 | ||
|
|
df8dd77817 | ||
|
|
9f3c0fdcd8 | ||
|
|
84e16575e4 | ||
|
|
55d49d4379 | ||
|
|
40528e9ae7 | ||
|
|
dc622a95d0 | ||
|
|
ecfbc8f952 | ||
|
|
df0e2a4f2c | ||
|
|
303efd2b8d | ||
|
|
dccc206e35 | ||
|
|
5afbcce176 | ||
|
|
6f2ded53a1 | ||
|
|
6d2a80c14b | ||
|
|
6d1a648602 | ||
|
|
219a8ab031 | ||
|
|
3a00e23f5a | ||
|
|
250f5cb53d | ||
|
|
19fe63170c | ||
|
|
41381b1bb1 | ||
|
|
bcada5bfaf | ||
|
|
4490e4cc44 | ||
|
|
27c1ac49b4 | ||
|
|
585c32b304 | ||
|
|
dc6bd1511a | ||
|
|
ca5afaebca | ||
|
|
6c066f0e13 | ||
|
|
fbb25a05be | ||
|
|
500b9cf184 | ||
|
|
d34b18c783 | ||
|
|
7536f647e4 | ||
|
|
a138d71ec1 | ||
|
|
bc4039886d | ||
|
|
9c3b58dcf1 | ||
|
|
74b5fed434 | ||
|
|
85eb505672 | ||
|
|
ccdd96ca52 | ||
|
|
4c723d8ec3 | ||
|
|
bec2d8eaea | ||
|
|
a0a51eb098 | ||
|
|
a5a0ccf86a | ||
|
|
fbc4c998ed | ||
|
|
56d2986d5d | ||
|
|
a33ef355f6 | ||
|
|
85b7478fe9 | ||
|
|
d1e6ffffad | ||
|
|
61c6eae207 | ||
|
|
dd07b19e27 | ||
|
|
a076cd8e16 | ||
|
|
2b72beefe7 | ||
|
|
11bf2cf1d1 | ||
|
|
19921e9362 | ||
|
|
5aa4f1dc55 | ||
|
|
922e273e6b | ||
|
|
57636ad4f4 | ||
|
|
cefc2cf82d | ||
|
|
b3e56e71fb | ||
|
|
5b5fa49a89 | ||
|
|
decfa3c9e1 | ||
|
|
48305755bf | ||
|
|
7853bfbed7 | ||
|
|
23ebbb4bc8 | ||
|
|
1b456bd5d5 | ||
|
|
af769881d3 | ||
|
|
4715c5c769 | ||
|
|
dbe413668d | ||
|
|
26475082cb | ||
|
|
f072c64bf2 | ||
|
|
aed636f5f0 | ||
|
|
53a10518b9 | ||
|
|
b4e6dc3037 | ||
|
|
3eb40786ca | ||
|
|
a4bc845478 | ||
|
|
fa468c5d57 | ||
|
|
8abc7aeb71 | ||
|
|
693d8a3a52 | ||
|
|
a9df12ab45 | ||
|
|
a519272d97 | ||
|
|
345864eb85 | ||
|
|
35e538d46a | ||
|
|
2dc31677e1 | ||
|
|
1066de8c69 | ||
|
|
2d69bacb00 | ||
|
|
0974b4c606 | ||
|
|
cf4b97b233 | ||
|
|
7f3e9b8695 | ||
|
|
ce90f9b2db | ||
|
|
c3675d4c9b | ||
|
|
2b7deffe36 | ||
|
|
941ac9c3d9 | ||
|
|
7242b5ff62 | ||
|
|
b4297967a0 | ||
|
|
9ae5b6299d | ||
|
|
814d710e56 | ||
|
|
cc5b31ffc9 | ||
|
|
d7a1a0363f | ||
|
|
b59654544b | ||
|
|
0e12ba7454 | ||
|
|
20fd00b14b | ||
|
|
76d4e416bc | ||
|
|
c07fcf780a | ||
|
|
ccedeca96e | ||
|
|
64a5187d96 | ||
|
|
0a151115bb | ||
|
|
19085ac8f4 | ||
|
|
041501aea9 | ||
|
|
9c0944581a | ||
|
|
4588bbeb42 | ||
|
|
ec5449f3a1 | ||
|
|
310fdaf556 | ||
|
|
dcb6dd9b7a | ||
|
|
043ab2520f | ||
|
|
08c29020dd | ||
|
|
7a58734994 | ||
|
|
9ef118509e | ||
|
|
7c54a7b38a | ||
|
|
09e777a3e1 | ||
|
|
a72bc0c4bb | ||
|
|
80de641c1c | ||
|
|
76810eca2b | ||
|
|
1448b03585 | ||
|
|
5796735015 | ||
|
|
d8310a8fca | ||
|
|
78031c2938 | ||
|
|
d83d35c1bb | ||
|
|
843355f89f | ||
|
|
c006a95df1 | ||
|
|
df267ee4e8 | ||
|
|
edd614ea38 | ||
|
|
7e7e62c6ff | ||
|
|
eda9ff8300 | ||
|
|
efb7a299af | ||
|
|
d06750a5fd | ||
|
|
8c72cd12ee | ||
|
|
751e250f70 | ||
|
|
b50014067d | ||
|
|
f5c113e439 | ||
|
|
5e181eddfe | ||
|
|
55f0b3d758 | ||
|
|
eb7ef26736 | ||
|
|
e1b7f1f240 | ||
|
|
9e7ae568d6 | ||
|
|
f7b79452b4 | ||
|
|
43459079ab | ||
|
|
4067d6c4b6 | ||
|
|
28106fcac4 | ||
|
|
c222570a9b | ||
|
|
4e36bb0d23 | ||
|
|
f50b18eec7 | ||
|
|
fc337d5853 | ||
|
|
32798bf242 | ||
|
|
c2e5ece08b | ||
|
|
764b62473a | ||
|
|
6682956333 | ||
|
|
ffc8c0c1e1 | ||
|
|
4acbfbf13b | ||
|
|
6549b04ec6 | ||
|
|
130fd8df54 | ||
|
|
bcd4d77ba6 | ||
|
|
006d092751 | ||
|
|
9e4a75b142 | ||
|
|
0ff1aa910c | ||
|
|
901da9dccc | ||
|
|
67ffa7031e | ||
|
|
827fad66a0 | ||
|
|
9b721db205 | ||
|
|
ba0e732eb0 | ||
|
|
b2da59b197 | ||
|
|
7aa6af1138 | ||
|
|
87b800e154 |
11
.github/workflows/benchmark.yml
vendored
11
.github/workflows/benchmark.yml
vendored
@@ -7,7 +7,7 @@ on:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
DIFFUSERS_IS_CI: yes
|
DIFFUSERS_IS_CI: yes
|
||||||
HF_HUB_ENABLE_HF_TRANSFER: 1
|
HF_XET_HIGH_PERFORMANCE: 1
|
||||||
HF_HOME: /mnt/cache
|
HF_HOME: /mnt/cache
|
||||||
OMP_NUM_THREADS: 8
|
OMP_NUM_THREADS: 8
|
||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
@@ -28,7 +28,7 @@ jobs:
|
|||||||
options: --shm-size "16gb" --ipc host --gpus all
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
@@ -38,9 +38,8 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
apt update
|
apt update
|
||||||
apt install -y libpq-dev postgresql-client
|
apt install -y libpq-dev postgresql-client
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install -r benchmarks/requirements.txt
|
||||||
python -m uv pip install -r benchmarks/requirements.txt
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -59,7 +58,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: benchmark_test_reports
|
name: benchmark_test_reports
|
||||||
path: benchmarks/${{ env.BASE_PATH }}
|
path: benchmarks/${{ env.BASE_PATH }}
|
||||||
|
|||||||
38
.github/workflows/build_docker_images.yml
vendored
38
.github/workflows/build_docker_images.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
|||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v1
|
||||||
|
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Find Changed Dockerfiles
|
- name: Find Changed Dockerfiles
|
||||||
id: file_changes
|
id: file_changes
|
||||||
@@ -42,19 +42,40 @@ jobs:
|
|||||||
CHANGED_FILES: ${{ steps.file_changes.outputs.all }}
|
CHANGED_FILES: ${{ steps.file_changes.outputs.all }}
|
||||||
run: |
|
run: |
|
||||||
echo "$CHANGED_FILES"
|
echo "$CHANGED_FILES"
|
||||||
|
ALLOWED_IMAGES=(
|
||||||
|
diffusers-pytorch-cpu
|
||||||
|
diffusers-pytorch-cuda
|
||||||
|
diffusers-pytorch-xformers-cuda
|
||||||
|
diffusers-pytorch-minimum-cuda
|
||||||
|
diffusers-doc-builder
|
||||||
|
)
|
||||||
|
|
||||||
|
declare -A IMAGES_TO_BUILD=()
|
||||||
|
|
||||||
for FILE in $CHANGED_FILES; do
|
for FILE in $CHANGED_FILES; do
|
||||||
# skip anything that isn't still on disk
|
# skip anything that isn't still on disk
|
||||||
if [[ ! -f "$FILE" ]]; then
|
if [[ ! -e "$FILE" ]]; then
|
||||||
echo "Skipping removed file $FILE"
|
echo "Skipping removed file $FILE"
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
if [[ "$FILE" == docker/*Dockerfile ]]; then
|
|
||||||
DOCKER_PATH="${FILE%/Dockerfile}"
|
for IMAGE in "${ALLOWED_IMAGES[@]}"; do
|
||||||
DOCKER_TAG=$(basename "$DOCKER_PATH")
|
if [[ "$FILE" == docker/${IMAGE}/* ]]; then
|
||||||
echo "Building Docker image for $DOCKER_TAG"
|
IMAGES_TO_BUILD["$IMAGE"]=1
|
||||||
docker build -t "$DOCKER_TAG" "$DOCKER_PATH"
|
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ ${#IMAGES_TO_BUILD[@]} -eq 0 ]]; then
|
||||||
|
echo "No relevant Docker changes detected."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
for IMAGE in "${!IMAGES_TO_BUILD[@]}"; do
|
||||||
|
DOCKER_PATH="docker/${IMAGE}"
|
||||||
|
echo "Building Docker image for $IMAGE"
|
||||||
|
docker build -t "$IMAGE" "$DOCKER_PATH"
|
||||||
|
done
|
||||||
if: steps.file_changes.outputs.all != ''
|
if: steps.file_changes.outputs.all != ''
|
||||||
|
|
||||||
build-and-push-docker-images:
|
build-and-push-docker-images:
|
||||||
@@ -72,14 +93,13 @@ jobs:
|
|||||||
image-name:
|
image-name:
|
||||||
- diffusers-pytorch-cpu
|
- diffusers-pytorch-cpu
|
||||||
- diffusers-pytorch-cuda
|
- diffusers-pytorch-cuda
|
||||||
- diffusers-pytorch-cuda
|
|
||||||
- diffusers-pytorch-xformers-cuda
|
- diffusers-pytorch-xformers-cuda
|
||||||
- diffusers-pytorch-minimum-cuda
|
- diffusers-pytorch-minimum-cuda
|
||||||
- diffusers-doc-builder
|
- diffusers-doc-builder
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v1
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
|
|||||||
26
.github/workflows/build_pr_documentation.yml
vendored
26
.github/workflows/build_pr_documentation.yml
vendored
@@ -12,7 +12,33 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
check-links:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v6
|
||||||
|
with:
|
||||||
|
python-version: '3.10'
|
||||||
|
|
||||||
|
- name: Install uv
|
||||||
|
run: |
|
||||||
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||||
|
|
||||||
|
- name: Install doc-builder
|
||||||
|
run: |
|
||||||
|
uv pip install --system git+https://github.com/huggingface/doc-builder.git@main
|
||||||
|
|
||||||
|
- name: Check documentation links
|
||||||
|
run: |
|
||||||
|
uv run doc-builder check-links docs/source/en
|
||||||
|
|
||||||
build:
|
build:
|
||||||
|
needs: check-links
|
||||||
uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main
|
uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main
|
||||||
with:
|
with:
|
||||||
commit_sha: ${{ github.event.pull_request.head.sha }}
|
commit_sha: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|||||||
22
.github/workflows/codeql.yml
vendored
Normal file
22
.github/workflows/codeql.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
name: CodeQL Security Analysis For Github Actions
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["main"]
|
||||||
|
workflow_dispatch:
|
||||||
|
# pull_request:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
codeql:
|
||||||
|
name: CodeQL Analysis
|
||||||
|
uses: huggingface/security-workflows/.github/workflows/codeql-reusable.yml@v1
|
||||||
|
permissions:
|
||||||
|
security-events: write
|
||||||
|
packages: read
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
with:
|
||||||
|
languages: '["actions","python"]'
|
||||||
|
queries: 'security-extended,security-and-quality'
|
||||||
|
runner: 'ubuntu-latest' #optional if need custom runner
|
||||||
29
.github/workflows/mirror_community_pipeline.yml
vendored
29
.github/workflows/mirror_community_pipeline.yml
vendored
@@ -24,7 +24,6 @@ jobs:
|
|||||||
mirror_community_pipeline:
|
mirror_community_pipeline:
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_COMMUNITY_MIRROR }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_COMMUNITY_MIRROR }}
|
||||||
|
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
# Checkout to correct ref
|
# Checkout to correct ref
|
||||||
@@ -39,42 +38,46 @@ jobs:
|
|||||||
# If ref is 'refs/heads/main' => set 'main'
|
# If ref is 'refs/heads/main' => set 'main'
|
||||||
# Else it must be a tag => set {tag}
|
# Else it must be a tag => set {tag}
|
||||||
- name: Set checkout_ref and path_in_repo
|
- name: Set checkout_ref and path_in_repo
|
||||||
|
env:
|
||||||
|
EVENT_NAME: ${{ github.event_name }}
|
||||||
|
EVENT_INPUT_REF: ${{ github.event.inputs.ref }}
|
||||||
|
GITHUB_REF: ${{ github.ref }}
|
||||||
run: |
|
run: |
|
||||||
if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
|
if [ "$EVENT_NAME" == "workflow_dispatch" ]; then
|
||||||
if [ -z "${{ github.event.inputs.ref }}" ]; then
|
if [ -z "$EVENT_INPUT_REF" ]; then
|
||||||
echo "Error: Missing ref input"
|
echo "Error: Missing ref input"
|
||||||
exit 1
|
exit 1
|
||||||
elif [ "${{ github.event.inputs.ref }}" == "main" ]; then
|
elif [ "$EVENT_INPUT_REF" == "main" ]; then
|
||||||
echo "CHECKOUT_REF=refs/heads/main" >> $GITHUB_ENV
|
echo "CHECKOUT_REF=refs/heads/main" >> $GITHUB_ENV
|
||||||
echo "PATH_IN_REPO=main" >> $GITHUB_ENV
|
echo "PATH_IN_REPO=main" >> $GITHUB_ENV
|
||||||
else
|
else
|
||||||
echo "CHECKOUT_REF=refs/tags/${{ github.event.inputs.ref }}" >> $GITHUB_ENV
|
echo "CHECKOUT_REF=refs/tags/$EVENT_INPUT_REF" >> $GITHUB_ENV
|
||||||
echo "PATH_IN_REPO=${{ github.event.inputs.ref }}" >> $GITHUB_ENV
|
echo "PATH_IN_REPO=$EVENT_INPUT_REF" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
elif [ "${{ github.ref }}" == "refs/heads/main" ]; then
|
elif [ "$GITHUB_REF" == "refs/heads/main" ]; then
|
||||||
echo "CHECKOUT_REF=${{ github.ref }}" >> $GITHUB_ENV
|
echo "CHECKOUT_REF=$GITHUB_REF" >> $GITHUB_ENV
|
||||||
echo "PATH_IN_REPO=main" >> $GITHUB_ENV
|
echo "PATH_IN_REPO=main" >> $GITHUB_ENV
|
||||||
else
|
else
|
||||||
# e.g. refs/tags/v0.28.1 -> v0.28.1
|
# e.g. refs/tags/v0.28.1 -> v0.28.1
|
||||||
echo "CHECKOUT_REF=${{ github.ref }}" >> $GITHUB_ENV
|
echo "CHECKOUT_REF=$GITHUB_REF" >> $GITHUB_ENV
|
||||||
echo "PATH_IN_REPO=$(echo ${{ github.ref }} | sed 's/^refs\/tags\///')" >> $GITHUB_ENV
|
echo "PATH_IN_REPO=$(echo $GITHUB_REF | sed 's/^refs\/tags\///')" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
- name: Print env vars
|
- name: Print env vars
|
||||||
run: |
|
run: |
|
||||||
echo "CHECKOUT_REF: ${{ env.CHECKOUT_REF }}"
|
echo "CHECKOUT_REF: ${{ env.CHECKOUT_REF }}"
|
||||||
echo "PATH_IN_REPO: ${{ env.PATH_IN_REPO }}"
|
echo "PATH_IN_REPO: ${{ env.PATH_IN_REPO }}"
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
ref: ${{ env.CHECKOUT_REF }}
|
ref: ${{ env.CHECKOUT_REF }}
|
||||||
|
|
||||||
# Setup + install dependencies
|
# Setup + install dependencies
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install --upgrade huggingface_hub
|
pip install --upgrade huggingface_hub
|
||||||
|
|
||||||
# Check secret is set
|
# Check secret is set
|
||||||
|
|||||||
164
.github/workflows/nightly_tests.yml
vendored
164
.github/workflows/nightly_tests.yml
vendored
@@ -7,7 +7,7 @@ on:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
DIFFUSERS_IS_CI: yes
|
DIFFUSERS_IS_CI: yes
|
||||||
HF_HUB_ENABLE_HF_TRANSFER: 1
|
HF_XET_HIGH_PERFORMANCE: 1
|
||||||
OMP_NUM_THREADS: 8
|
OMP_NUM_THREADS: 8
|
||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
PYTEST_TIMEOUT: 600
|
PYTEST_TIMEOUT: 600
|
||||||
@@ -28,7 +28,7 @@ jobs:
|
|||||||
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
@@ -44,7 +44,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Pipeline Tests Artifacts
|
- name: Pipeline Tests Artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: test-pipelines.json
|
name: test-pipelines.json
|
||||||
path: reports
|
path: reports
|
||||||
@@ -64,17 +64,18 @@ jobs:
|
|||||||
options: --shm-size "16gb" --ipc host --gpus all
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: nvidia-smi
|
run: nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
python -m uv pip install pytest-reportlog
|
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
|
||||||
|
uv pip install pytest-reportlog
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -84,8 +85,8 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
||||||
--report-log=tests_pipeline_${{ matrix.module }}_cuda.log \
|
--report-log=tests_pipeline_${{ matrix.module }}_cuda.log \
|
||||||
tests/pipelines/${{ matrix.module }}
|
tests/pipelines/${{ matrix.module }}
|
||||||
@@ -96,7 +97,7 @@ jobs:
|
|||||||
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
|
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: pipeline_${{ matrix.module }}_test_reports
|
name: pipeline_${{ matrix.module }}_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -118,17 +119,18 @@ jobs:
|
|||||||
module: [models, schedulers, lora, others, single_file, examples]
|
module: [models, schedulers, lora, others, single_file, examples]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install peft@git+https://github.com/huggingface/peft.git
|
||||||
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
python -m uv pip install pytest-reportlog
|
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
|
||||||
|
uv pip install pytest-reportlog
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: python utils/print_env.py
|
run: python utils/print_env.py
|
||||||
|
|
||||||
@@ -139,8 +141,8 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_torch_${{ matrix.module }}_cuda \
|
--make-reports=tests_torch_${{ matrix.module }}_cuda \
|
||||||
--report-log=tests_torch_${{ matrix.module }}_cuda.log \
|
--report-log=tests_torch_${{ matrix.module }}_cuda.log \
|
||||||
tests/${{ matrix.module }}
|
tests/${{ matrix.module }}
|
||||||
@@ -152,8 +154,8 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v --make-reports=examples_torch_cuda \
|
--make-reports=examples_torch_cuda \
|
||||||
--report-log=examples_torch_cuda.log \
|
--report-log=examples_torch_cuda.log \
|
||||||
examples/
|
examples/
|
||||||
|
|
||||||
@@ -165,7 +167,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: torch_${{ matrix.module }}_cuda_test_reports
|
name: torch_${{ matrix.module }}_cuda_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -182,7 +184,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
@@ -191,8 +193,9 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality,training]"
|
||||||
python -m uv pip install -e [quality,test,training]
|
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
|
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -201,14 +204,14 @@ jobs:
|
|||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
RUN_COMPILE: yes
|
RUN_COMPILE: yes
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
run: cat reports/tests_torch_compile_cuda_failures_short.txt
|
run: cat reports/tests_torch_compile_cuda_failures_short.txt
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: torch_compile_test_reports
|
name: torch_compile_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -225,18 +228,19 @@ jobs:
|
|||||||
options: --shm-size "16gb" --ipc host --gpus all
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: nvidia-smi
|
run: nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install peft@git+https://github.com/huggingface/peft.git
|
||||||
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
python -m uv pip install pytest-reportlog
|
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
|
||||||
|
uv pip install pytest-reportlog
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -247,7 +251,7 @@ jobs:
|
|||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
BIG_GPU_MEMORY: 40
|
BIG_GPU_MEMORY: 40
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-m "big_accelerator" \
|
-m "big_accelerator" \
|
||||||
--make-reports=tests_big_gpu_torch_cuda \
|
--make-reports=tests_big_gpu_torch_cuda \
|
||||||
--report-log=tests_big_gpu_torch_cuda.log \
|
--report-log=tests_big_gpu_torch_cuda.log \
|
||||||
@@ -259,7 +263,7 @@ jobs:
|
|||||||
cat reports/tests_big_gpu_torch_cuda_failures_short.txt
|
cat reports/tests_big_gpu_torch_cuda_failures_short.txt
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: torch_cuda_big_gpu_test_reports
|
name: torch_cuda_big_gpu_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -276,16 +280,17 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install peft@git+https://github.com/huggingface/peft.git
|
||||||
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
|
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
@@ -297,8 +302,8 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_torch_minimum_version_cuda \
|
--make-reports=tests_torch_minimum_version_cuda \
|
||||||
tests/models/test_modeling_common.py \
|
tests/models/test_modeling_common.py \
|
||||||
tests/pipelines/test_pipelines_common.py \
|
tests/pipelines/test_pipelines_common.py \
|
||||||
@@ -316,7 +321,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: torch_minimum_version_cuda_test_reports
|
name: torch_minimum_version_cuda_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -340,6 +345,9 @@ jobs:
|
|||||||
- backend: "optimum_quanto"
|
- backend: "optimum_quanto"
|
||||||
test_location: "quanto"
|
test_location: "quanto"
|
||||||
additional_deps: []
|
additional_deps: []
|
||||||
|
- backend: "nvidia_modelopt"
|
||||||
|
test_location: "modelopt"
|
||||||
|
additional_deps: []
|
||||||
runs-on:
|
runs-on:
|
||||||
group: aws-g6e-xlarge-plus
|
group: aws-g6e-xlarge-plus
|
||||||
container:
|
container:
|
||||||
@@ -347,20 +355,21 @@ jobs:
|
|||||||
options: --shm-size "20gb" --ipc host --gpus all
|
options: --shm-size "20gb" --ipc host --gpus all
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: nvidia-smi
|
run: nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install -U ${{ matrix.config.backend }}
|
||||||
python -m uv pip install -U ${{ matrix.config.backend }}
|
|
||||||
if [ "${{ join(matrix.config.additional_deps, ' ') }}" != "" ]; then
|
if [ "${{ join(matrix.config.additional_deps, ' ') }}" != "" ]; then
|
||||||
python -m uv pip install ${{ join(matrix.config.additional_deps, ' ') }}
|
uv pip install ${{ join(matrix.config.additional_deps, ' ') }}
|
||||||
fi
|
fi
|
||||||
python -m uv pip install pytest-reportlog
|
uv pip install pytest-reportlog
|
||||||
|
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
|
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -371,7 +380,7 @@ jobs:
|
|||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
BIG_GPU_MEMORY: 40
|
BIG_GPU_MEMORY: 40
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
--make-reports=tests_${{ matrix.config.backend }}_torch_cuda \
|
--make-reports=tests_${{ matrix.config.backend }}_torch_cuda \
|
||||||
--report-log=tests_${{ matrix.config.backend }}_torch_cuda.log \
|
--report-log=tests_${{ matrix.config.backend }}_torch_cuda.log \
|
||||||
tests/quantization/${{ matrix.config.test_location }}
|
tests/quantization/${{ matrix.config.test_location }}
|
||||||
@@ -382,7 +391,7 @@ jobs:
|
|||||||
cat reports/tests_${{ matrix.config.backend }}_torch_cuda_failures_short.txt
|
cat reports/tests_${{ matrix.config.backend }}_torch_cuda_failures_short.txt
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: torch_cuda_${{ matrix.config.backend }}_reports
|
name: torch_cuda_${{ matrix.config.backend }}_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -399,17 +408,18 @@ jobs:
|
|||||||
options: --shm-size "20gb" --ipc host --gpus all
|
options: --shm-size "20gb" --ipc host --gpus all
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: nvidia-smi
|
run: nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install -U bitsandbytes optimum_quanto
|
||||||
python -m uv pip install -U bitsandbytes optimum_quanto
|
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
python -m uv pip install pytest-reportlog
|
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
|
||||||
|
uv pip install pytest-reportlog
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -420,7 +430,7 @@ jobs:
|
|||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
BIG_GPU_MEMORY: 40
|
BIG_GPU_MEMORY: 40
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
--make-reports=tests_pipeline_level_quant_torch_cuda \
|
--make-reports=tests_pipeline_level_quant_torch_cuda \
|
||||||
--report-log=tests_pipeline_level_quant_torch_cuda.log \
|
--report-log=tests_pipeline_level_quant_torch_cuda.log \
|
||||||
tests/quantization/test_pipeline_level_quantization.py
|
tests/quantization/test_pipeline_level_quantization.py
|
||||||
@@ -431,7 +441,7 @@ jobs:
|
|||||||
cat reports/tests_pipeline_level_quant_torch_cuda_failures_short.txt
|
cat reports/tests_pipeline_level_quant_torch_cuda_failures_short.txt
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: torch_cuda_pipeline_level_quant_reports
|
name: torch_cuda_pipeline_level_quant_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -456,7 +466,7 @@ jobs:
|
|||||||
image: diffusers/diffusers-pytorch-cpu
|
image: diffusers/diffusers-pytorch-cpu
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
@@ -464,7 +474,7 @@ jobs:
|
|||||||
run: mkdir -p combined_reports
|
run: mkdir -p combined_reports
|
||||||
|
|
||||||
- name: Download all test reports
|
- name: Download all test reports
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v7
|
||||||
with:
|
with:
|
||||||
path: artifacts
|
path: artifacts
|
||||||
|
|
||||||
@@ -490,7 +500,7 @@ jobs:
|
|||||||
cat $CONSOLIDATED_REPORT_PATH >> $GITHUB_STEP_SUMMARY
|
cat $CONSOLIDATED_REPORT_PATH >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
- name: Upload consolidated report
|
- name: Upload consolidated report
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: consolidated_test_report
|
name: consolidated_test_report
|
||||||
path: ${{ env.CONSOLIDATED_REPORT_PATH }}
|
path: ${{ env.CONSOLIDATED_REPORT_PATH }}
|
||||||
@@ -504,7 +514,7 @@ jobs:
|
|||||||
#
|
#
|
||||||
# steps:
|
# steps:
|
||||||
# - name: Checkout diffusers
|
# - name: Checkout diffusers
|
||||||
# uses: actions/checkout@v3
|
# uses: actions/checkout@v6
|
||||||
# with:
|
# with:
|
||||||
# fetch-depth: 2
|
# fetch-depth: 2
|
||||||
#
|
#
|
||||||
@@ -520,11 +530,11 @@ jobs:
|
|||||||
# - name: Install dependencies
|
# - name: Install dependencies
|
||||||
# shell: arch -arch arm64 bash {0}
|
# shell: arch -arch arm64 bash {0}
|
||||||
# run: |
|
# run: |
|
||||||
# ${CONDA_RUN} python -m pip install --upgrade pip uv
|
# ${CONDA_RUN} pip install --upgrade pip uv
|
||||||
# ${CONDA_RUN} python -m uv pip install -e [quality,test]
|
# ${CONDA_RUN} uv pip install -e ".[quality]"
|
||||||
# ${CONDA_RUN} python -m uv pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
|
# ${CONDA_RUN} uv pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
# ${CONDA_RUN} python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate
|
# ${CONDA_RUN} uv pip install accelerate@git+https://github.com/huggingface/accelerate
|
||||||
# ${CONDA_RUN} python -m uv pip install pytest-reportlog
|
# ${CONDA_RUN} uv pip install pytest-reportlog
|
||||||
# - name: Environment
|
# - name: Environment
|
||||||
# shell: arch -arch arm64 bash {0}
|
# shell: arch -arch arm64 bash {0}
|
||||||
# run: |
|
# run: |
|
||||||
@@ -535,7 +545,7 @@ jobs:
|
|||||||
# HF_HOME: /System/Volumes/Data/mnt/cache
|
# HF_HOME: /System/Volumes/Data/mnt/cache
|
||||||
# HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
# HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
# run: |
|
# run: |
|
||||||
# ${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
# ${CONDA_RUN} pytest -n 1 --make-reports=tests_torch_mps \
|
||||||
# --report-log=tests_torch_mps.log \
|
# --report-log=tests_torch_mps.log \
|
||||||
# tests/
|
# tests/
|
||||||
# - name: Failure short reports
|
# - name: Failure short reports
|
||||||
@@ -544,7 +554,7 @@ jobs:
|
|||||||
#
|
#
|
||||||
# - name: Test suite reports artifacts
|
# - name: Test suite reports artifacts
|
||||||
# if: ${{ always() }}
|
# if: ${{ always() }}
|
||||||
# uses: actions/upload-artifact@v4
|
# uses: actions/upload-artifact@v6
|
||||||
# with:
|
# with:
|
||||||
# name: torch_mps_test_reports
|
# name: torch_mps_test_reports
|
||||||
# path: reports
|
# path: reports
|
||||||
@@ -560,7 +570,7 @@ jobs:
|
|||||||
#
|
#
|
||||||
# steps:
|
# steps:
|
||||||
# - name: Checkout diffusers
|
# - name: Checkout diffusers
|
||||||
# uses: actions/checkout@v3
|
# uses: actions/checkout@v6
|
||||||
# with:
|
# with:
|
||||||
# fetch-depth: 2
|
# fetch-depth: 2
|
||||||
#
|
#
|
||||||
@@ -576,11 +586,11 @@ jobs:
|
|||||||
# - name: Install dependencies
|
# - name: Install dependencies
|
||||||
# shell: arch -arch arm64 bash {0}
|
# shell: arch -arch arm64 bash {0}
|
||||||
# run: |
|
# run: |
|
||||||
# ${CONDA_RUN} python -m pip install --upgrade pip uv
|
# ${CONDA_RUN} pip install --upgrade pip uv
|
||||||
# ${CONDA_RUN} python -m uv pip install -e [quality,test]
|
# ${CONDA_RUN} uv pip install -e ".[quality]"
|
||||||
# ${CONDA_RUN} python -m uv pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
|
# ${CONDA_RUN} uv pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
# ${CONDA_RUN} python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate
|
# ${CONDA_RUN} uv pip install accelerate@git+https://github.com/huggingface/accelerate
|
||||||
# ${CONDA_RUN} python -m uv pip install pytest-reportlog
|
# ${CONDA_RUN} uv pip install pytest-reportlog
|
||||||
# - name: Environment
|
# - name: Environment
|
||||||
# shell: arch -arch arm64 bash {0}
|
# shell: arch -arch arm64 bash {0}
|
||||||
# run: |
|
# run: |
|
||||||
@@ -591,7 +601,7 @@ jobs:
|
|||||||
# HF_HOME: /System/Volumes/Data/mnt/cache
|
# HF_HOME: /System/Volumes/Data/mnt/cache
|
||||||
# HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
# HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
# run: |
|
# run: |
|
||||||
# ${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
# ${CONDA_RUN} pytest -n 1 --make-reports=tests_torch_mps \
|
||||||
# --report-log=tests_torch_mps.log \
|
# --report-log=tests_torch_mps.log \
|
||||||
# tests/
|
# tests/
|
||||||
# - name: Failure short reports
|
# - name: Failure short reports
|
||||||
@@ -600,7 +610,7 @@ jobs:
|
|||||||
#
|
#
|
||||||
# - name: Test suite reports artifacts
|
# - name: Test suite reports artifacts
|
||||||
# if: ${{ always() }}
|
# if: ${{ always() }}
|
||||||
# uses: actions/upload-artifact@v4
|
# uses: actions/upload-artifact@v6
|
||||||
# with:
|
# with:
|
||||||
# name: torch_mps_test_reports
|
# name: torch_mps_test_reports
|
||||||
# path: reports
|
# path: reports
|
||||||
|
|||||||
@@ -10,12 +10,12 @@ jobs:
|
|||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: '3.8'
|
python-version: '3.10'
|
||||||
|
|
||||||
- name: Notify Slack about the release
|
- name: Notify Slack about the release
|
||||||
env:
|
env:
|
||||||
|
|||||||
13
.github/workflows/pr_dependency_test.yml
vendored
13
.github/workflows/pr_dependency_test.yml
vendored
@@ -18,18 +18,15 @@ jobs:
|
|||||||
check_dependencies:
|
check_dependencies:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v6
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: "3.8"
|
python-version: "3.10"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pip install -e .
|
||||||
python -m pip install --upgrade pip uv
|
pip install pytest
|
||||||
python -m uv pip install -e .
|
|
||||||
python -m uv pip install pytest
|
|
||||||
- name: Check for soft dependencies
|
- name: Check for soft dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
pytest tests/others/test_dependencies.py
|
pytest tests/others/test_dependencies.py
|
||||||
|
|||||||
58
.github/workflows/pr_modular_tests.yml
vendored
58
.github/workflows/pr_modular_tests.yml
vendored
@@ -1,3 +1,4 @@
|
|||||||
|
|
||||||
name: Fast PR tests for Modular
|
name: Fast PR tests for Modular
|
||||||
|
|
||||||
on:
|
on:
|
||||||
@@ -26,7 +27,7 @@ concurrency:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
DIFFUSERS_IS_CI: yes
|
DIFFUSERS_IS_CI: yes
|
||||||
HF_HUB_ENABLE_HF_TRANSFER: 1
|
HF_XET_HIGH_PERFORMANCE: 1
|
||||||
OMP_NUM_THREADS: 4
|
OMP_NUM_THREADS: 4
|
||||||
MKL_NUM_THREADS: 4
|
MKL_NUM_THREADS: 4
|
||||||
PYTEST_TIMEOUT: 60
|
PYTEST_TIMEOUT: 60
|
||||||
@@ -35,14 +36,14 @@ jobs:
|
|||||||
check_code_quality:
|
check_code_quality:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v6
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install .[quality]
|
pip install .[quality]
|
||||||
- name: Check quality
|
- name: Check quality
|
||||||
run: make quality
|
run: make quality
|
||||||
@@ -55,14 +56,14 @@ jobs:
|
|||||||
needs: check_code_quality
|
needs: check_code_quality
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v6
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install .[quality]
|
pip install .[quality]
|
||||||
- name: Check repo consistency
|
- name: Check repo consistency
|
||||||
run: |
|
run: |
|
||||||
@@ -77,23 +78,13 @@ jobs:
|
|||||||
|
|
||||||
run_fast_tests:
|
run_fast_tests:
|
||||||
needs: [check_code_quality, check_repository_consistency]
|
needs: [check_code_quality, check_repository_consistency]
|
||||||
strategy:
|
name: Fast PyTorch Modular Pipeline CPU tests
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
config:
|
|
||||||
- name: Fast PyTorch Modular Pipeline CPU tests
|
|
||||||
framework: pytorch_pipelines
|
|
||||||
runner: aws-highmemory-32-plus
|
|
||||||
image: diffusers/diffusers-pytorch-cpu
|
|
||||||
report: torch_cpu_modular_pipelines
|
|
||||||
|
|
||||||
name: ${{ matrix.config.name }}
|
|
||||||
|
|
||||||
runs-on:
|
runs-on:
|
||||||
group: ${{ matrix.config.runner }}
|
group: aws-highmemory-32-plus
|
||||||
|
|
||||||
container:
|
container:
|
||||||
image: ${{ matrix.config.image }}
|
image: diffusers/diffusers-pytorch-cpu
|
||||||
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
@@ -102,40 +93,35 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run fast PyTorch Pipeline CPU tests
|
- name: Run fast PyTorch Pipeline CPU tests
|
||||||
if: ${{ matrix.config.framework == 'pytorch_pipelines' }}
|
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pytest -n 8 --max-worker-restart=0 --dist=loadfile \
|
||||||
python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile \
|
-k "not Flax and not Onnx" \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
--make-reports=tests_torch_cpu_modular_pipelines \
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
|
||||||
tests/modular_pipelines
|
tests/modular_pipelines
|
||||||
|
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
|
run: cat reports/tests_torch_cpu_modular_pipelines_failures_short.txt
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: pr_${{ matrix.config.framework }}_${{ matrix.config.report }}_test_reports
|
name: pr_pytorch_pipelines_torch_cpu_modular_pipelines_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
31
.github/workflows/pr_test_fetcher.yml
vendored
31
.github/workflows/pr_test_fetcher.yml
vendored
@@ -28,13 +28,12 @@ jobs:
|
|||||||
test_map: ${{ steps.set_matrix.outputs.test_map }}
|
test_map: ${{ steps.set_matrix.outputs.test_map }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -43,7 +42,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python utils/tests_fetcher.py | tee test_preparation.txt
|
python utils/tests_fetcher.py | tee test_preparation.txt
|
||||||
- name: Report fetched tests
|
- name: Report fetched tests
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: test_fetched
|
name: test_fetched
|
||||||
path: test_preparation.txt
|
path: test_preparation.txt
|
||||||
@@ -84,25 +83,22 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m pip install -e [quality,test]
|
uv pip install accelerate
|
||||||
python -m pip install accelerate
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run all selected tests on CPU
|
- name: Run all selected tests on CPU
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pytest -n 2 --dist=loadfile -v --make-reports=${{ matrix.modules }}_tests_cpu ${{ fromJson(needs.setup_pr_tests.outputs.test_map)[matrix.modules] }}
|
||||||
python -m pytest -n 2 --dist=loadfile -v --make-reports=${{ matrix.modules }}_tests_cpu ${{ fromJson(needs.setup_pr_tests.outputs.test_map)[matrix.modules] }}
|
|
||||||
|
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
@@ -113,7 +109,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.modules }}_test_reports
|
name: ${{ matrix.modules }}_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -142,25 +138,22 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pip install -e [quality]
|
||||||
python -m pip install -e [quality,test]
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run Hub tests for models, schedulers, and pipelines on a staging env
|
- name: Run Hub tests for models, schedulers, and pipelines on a staging env
|
||||||
if: ${{ matrix.config.framework == 'hub_tests_pytorch' }}
|
if: ${{ matrix.config.framework == 'hub_tests_pytorch' }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
HUGGINGFACE_CO_STAGING=true pytest \
|
||||||
HUGGINGFACE_CO_STAGING=true python -m pytest \
|
|
||||||
-m "is_staging_test" \
|
-m "is_staging_test" \
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
tests
|
tests
|
||||||
@@ -171,7 +164,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: pr_${{ matrix.config.report }}_test_reports
|
name: pr_${{ matrix.config.report }}_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
|
|||||||
83
.github/workflows/pr_tests.yml
vendored
83
.github/workflows/pr_tests.yml
vendored
@@ -22,7 +22,7 @@ concurrency:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
DIFFUSERS_IS_CI: yes
|
DIFFUSERS_IS_CI: yes
|
||||||
HF_HUB_ENABLE_HF_TRANSFER: 1
|
HF_XET_HIGH_PERFORMANCE: 1
|
||||||
OMP_NUM_THREADS: 4
|
OMP_NUM_THREADS: 4
|
||||||
MKL_NUM_THREADS: 4
|
MKL_NUM_THREADS: 4
|
||||||
PYTEST_TIMEOUT: 60
|
PYTEST_TIMEOUT: 60
|
||||||
@@ -31,14 +31,14 @@ jobs:
|
|||||||
check_code_quality:
|
check_code_quality:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v6
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: "3.8"
|
python-version: "3.10"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install .[quality]
|
pip install .[quality]
|
||||||
- name: Check quality
|
- name: Check quality
|
||||||
run: make quality
|
run: make quality
|
||||||
@@ -51,14 +51,14 @@ jobs:
|
|||||||
needs: check_code_quality
|
needs: check_code_quality
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v6
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: "3.8"
|
python-version: "3.10"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install .[quality]
|
pip install .[quality]
|
||||||
- name: Check repo consistency
|
- name: Check repo consistency
|
||||||
run: |
|
run: |
|
||||||
@@ -108,46 +108,42 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run fast PyTorch Pipeline CPU tests
|
- name: Run fast PyTorch Pipeline CPU tests
|
||||||
if: ${{ matrix.config.framework == 'pytorch_pipelines' }}
|
if: ${{ matrix.config.framework == 'pytorch_pipelines' }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pytest -n 8 --max-worker-restart=0 --dist=loadfile \
|
||||||
python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile \
|
-k "not Flax and not Onnx" \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
tests/pipelines
|
tests/pipelines
|
||||||
|
|
||||||
- name: Run fast PyTorch Model Scheduler CPU tests
|
- name: Run fast PyTorch Model Scheduler CPU tests
|
||||||
if: ${{ matrix.config.framework == 'pytorch_models' }}
|
if: ${{ matrix.config.framework == 'pytorch_models' }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
-k "not Flax and not Onnx and not Dependency" \
|
||||||
-s -v -k "not Flax and not Onnx and not Dependency" \
|
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
tests/models tests/schedulers tests/others
|
tests/models tests/schedulers tests/others
|
||||||
|
|
||||||
- name: Run example PyTorch CPU tests
|
- name: Run example PyTorch CPU tests
|
||||||
if: ${{ matrix.config.framework == 'pytorch_examples' }}
|
if: ${{ matrix.config.framework == 'pytorch_examples' }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install ".[training]"
|
||||||
python -m uv pip install peft timm
|
pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
examples
|
examples
|
||||||
|
|
||||||
@@ -157,7 +153,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: pr_${{ matrix.config.framework }}_${{ matrix.config.report }}_test_reports
|
name: pr_${{ matrix.config.framework }}_${{ matrix.config.report }}_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -189,25 +185,22 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run Hub tests for models, schedulers, and pipelines on a staging env
|
- name: Run Hub tests for models, schedulers, and pipelines on a staging env
|
||||||
if: ${{ matrix.config.framework == 'hub_tests_pytorch' }}
|
if: ${{ matrix.config.framework == 'hub_tests_pytorch' }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
HUGGINGFACE_CO_STAGING=true pytest \
|
||||||
HUGGINGFACE_CO_STAGING=true python -m pytest \
|
|
||||||
-m "is_staging_test" \
|
-m "is_staging_test" \
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
tests
|
tests
|
||||||
@@ -218,7 +211,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: pr_${{ matrix.config.report }}_test_reports
|
name: pr_${{ matrix.config.report }}_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -243,34 +236,32 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
# TODO (sayakpaul, DN6): revisit `--no-deps`
|
# TODO (sayakpaul, DN6): revisit `--no-deps`
|
||||||
python -m pip install -U peft@git+https://github.com/huggingface/peft.git --no-deps
|
uv pip install -U peft@git+https://github.com/huggingface/peft.git --no-deps
|
||||||
python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
uv pip install -U tokenizers
|
||||||
python -m uv pip install -U tokenizers
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
|
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
|
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run fast PyTorch LoRA tests with PEFT
|
- name: Run fast PyTorch LoRA tests with PEFT
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
\
|
||||||
-s -v \
|
|
||||||
--make-reports=tests_peft_main \
|
--make-reports=tests_peft_main \
|
||||||
tests/lora/
|
tests/lora/
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v \
|
\
|
||||||
--make-reports=tests_models_lora_peft_main \
|
--make-reports=tests_models_lora_peft_main \
|
||||||
tests/models/ -k "lora"
|
tests/models/ -k "lora"
|
||||||
|
|
||||||
@@ -282,7 +273,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: pr_main_test_reports
|
name: pr_main_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
|
|||||||
79
.github/workflows/pr_tests_gpu.yml
vendored
79
.github/workflows/pr_tests_gpu.yml
vendored
@@ -24,7 +24,7 @@ env:
|
|||||||
DIFFUSERS_IS_CI: yes
|
DIFFUSERS_IS_CI: yes
|
||||||
OMP_NUM_THREADS: 8
|
OMP_NUM_THREADS: 8
|
||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
HF_HUB_ENABLE_HF_TRANSFER: 1
|
HF_XET_HIGH_PERFORMANCE: 1
|
||||||
PYTEST_TIMEOUT: 600
|
PYTEST_TIMEOUT: 600
|
||||||
PIPELINE_USAGE_CUTOFF: 1000000000 # set high cutoff so that only always-test pipelines run
|
PIPELINE_USAGE_CUTOFF: 1000000000 # set high cutoff so that only always-test pipelines run
|
||||||
|
|
||||||
@@ -32,14 +32,14 @@ jobs:
|
|||||||
check_code_quality:
|
check_code_quality:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v6
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: "3.8"
|
python-version: "3.10"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install .[quality]
|
pip install .[quality]
|
||||||
- name: Check quality
|
- name: Check quality
|
||||||
run: make quality
|
run: make quality
|
||||||
@@ -52,14 +52,14 @@ jobs:
|
|||||||
needs: check_code_quality
|
needs: check_code_quality
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v6
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: "3.8"
|
python-version: "3.10"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install .[quality]
|
pip install .[quality]
|
||||||
- name: Check repo consistency
|
- name: Check repo consistency
|
||||||
run: |
|
run: |
|
||||||
@@ -83,13 +83,12 @@ jobs:
|
|||||||
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -101,7 +100,7 @@ jobs:
|
|||||||
echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT
|
echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT
|
||||||
- name: Pipeline Tests Artifacts
|
- name: Pipeline Tests Artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: test-pipelines.json
|
name: test-pipelines.json
|
||||||
path: reports
|
path: reports
|
||||||
@@ -121,7 +120,7 @@ jobs:
|
|||||||
options: --shm-size "16gb" --ipc host --gpus all
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
@@ -130,10 +129,10 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
@@ -152,14 +151,14 @@ jobs:
|
|||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
if [ "${{ matrix.module }}" = "ip_adapters" ]; then
|
if [ "${{ matrix.module }}" = "ip_adapters" ]; then
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
||||||
tests/pipelines/${{ matrix.module }}
|
tests/pipelines/${{ matrix.module }}
|
||||||
else
|
else
|
||||||
pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }})
|
pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }})
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx and $pattern" \
|
-k "not Flax and not Onnx and $pattern" \
|
||||||
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
||||||
tests/pipelines/${{ matrix.module }}
|
tests/pipelines/${{ matrix.module }}
|
||||||
fi
|
fi
|
||||||
@@ -171,7 +170,7 @@ jobs:
|
|||||||
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
|
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: pipeline_${{ matrix.module }}_test_reports
|
name: pipeline_${{ matrix.module }}_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -194,17 +193,17 @@ jobs:
|
|||||||
module: [models, schedulers, lora, others]
|
module: [models, schedulers, lora, others]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install peft@git+https://github.com/huggingface/peft.git
|
||||||
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
@@ -225,10 +224,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }})
|
pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }})
|
||||||
if [ -z "$pattern" ]; then
|
if [ -z "$pattern" ]; then
|
||||||
python -m pytest -n 1 -sv --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx" tests/${{ matrix.module }} \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx" tests/${{ matrix.module }} \
|
||||||
--make-reports=tests_torch_cuda_${{ matrix.module }}
|
--make-reports=tests_torch_cuda_${{ matrix.module }}
|
||||||
else
|
else
|
||||||
python -m pytest -n 1 -sv --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx and $pattern" tests/${{ matrix.module }} \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx and $pattern" tests/${{ matrix.module }} \
|
||||||
--make-reports=tests_torch_cuda_${{ matrix.module }}
|
--make-reports=tests_torch_cuda_${{ matrix.module }}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -240,7 +239,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: torch_cuda_test_reports_${{ matrix.module }}
|
name: torch_cuda_test_reports_${{ matrix.module }}
|
||||||
path: reports
|
path: reports
|
||||||
@@ -256,7 +255,7 @@ jobs:
|
|||||||
options: --gpus all --shm-size "16gb" --ipc host
|
options: --gpus all --shm-size "16gb" --ipc host
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
@@ -265,22 +264,20 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
|
||||||
python -m uv pip install -e [quality,test,training]
|
uv pip install -e ".[quality,training]"
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run example tests on GPU
|
- name: Run example tests on GPU
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install ".[training]"
|
||||||
python -m uv pip install timm
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile --make-reports=examples_torch_cuda examples/
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
|
|
||||||
|
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
@@ -290,7 +287,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: examples_test_reports
|
name: examples_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
|
|||||||
14
.github/workflows/pr_torch_dependency_test.yml
vendored
14
.github/workflows/pr_torch_dependency_test.yml
vendored
@@ -18,19 +18,15 @@ jobs:
|
|||||||
check_torch_dependencies:
|
check_torch_dependencies:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v6
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: "3.8"
|
python-version: "3.10"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pip install -e .
|
||||||
python -m pip install --upgrade pip uv
|
pip install torch torchvision torchaudio pytest
|
||||||
python -m uv pip install -e .
|
|
||||||
python -m uv pip install torch torchvision torchaudio
|
|
||||||
python -m uv pip install pytest
|
|
||||||
- name: Check for soft dependencies
|
- name: Check for soft dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
pytest tests/others/test_dependencies.py
|
pytest tests/others/test_dependencies.py
|
||||||
|
|||||||
74
.github/workflows/push_tests.yml
vendored
74
.github/workflows/push_tests.yml
vendored
@@ -14,7 +14,7 @@ env:
|
|||||||
DIFFUSERS_IS_CI: yes
|
DIFFUSERS_IS_CI: yes
|
||||||
OMP_NUM_THREADS: 8
|
OMP_NUM_THREADS: 8
|
||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
HF_HUB_ENABLE_HF_TRANSFER: 1
|
HF_XET_HIGH_PERFORMANCE: 1
|
||||||
PYTEST_TIMEOUT: 600
|
PYTEST_TIMEOUT: 600
|
||||||
PIPELINE_USAGE_CUTOFF: 50000
|
PIPELINE_USAGE_CUTOFF: 50000
|
||||||
|
|
||||||
@@ -29,13 +29,12 @@ jobs:
|
|||||||
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -47,7 +46,7 @@ jobs:
|
|||||||
echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT
|
echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT
|
||||||
- name: Pipeline Tests Artifacts
|
- name: Pipeline Tests Artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: test-pipelines.json
|
name: test-pipelines.json
|
||||||
path: reports
|
path: reports
|
||||||
@@ -67,7 +66,7 @@ jobs:
|
|||||||
options: --shm-size "16gb" --ipc host --gpus all
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
@@ -75,9 +74,10 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
|
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -87,8 +87,8 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
||||||
tests/pipelines/${{ matrix.module }}
|
tests/pipelines/${{ matrix.module }}
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
@@ -98,7 +98,7 @@ jobs:
|
|||||||
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
|
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: pipeline_${{ matrix.module }}_test_reports
|
name: pipeline_${{ matrix.module }}_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -120,16 +120,17 @@ jobs:
|
|||||||
module: [models, schedulers, lora, others, single_file]
|
module: [models, schedulers, lora, others, single_file]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install peft@git+https://github.com/huggingface/peft.git
|
||||||
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
|
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
@@ -141,8 +142,8 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_torch_cuda_${{ matrix.module }} \
|
--make-reports=tests_torch_cuda_${{ matrix.module }} \
|
||||||
tests/${{ matrix.module }}
|
tests/${{ matrix.module }}
|
||||||
|
|
||||||
@@ -154,7 +155,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: torch_cuda_test_reports_${{ matrix.module }}
|
name: torch_cuda_test_reports_${{ matrix.module }}
|
||||||
path: reports
|
path: reports
|
||||||
@@ -171,7 +172,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
@@ -180,8 +181,9 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality,training]"
|
||||||
python -m uv pip install -e [quality,test,training]
|
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
|
||||||
|
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -190,14 +192,14 @@ jobs:
|
|||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
RUN_COMPILE: yes
|
RUN_COMPILE: yes
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
run: cat reports/tests_torch_compile_cuda_failures_short.txt
|
run: cat reports/tests_torch_compile_cuda_failures_short.txt
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: torch_compile_test_reports
|
name: torch_compile_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -214,7 +216,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
@@ -223,8 +225,7 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality,training]"
|
||||||
python -m uv pip install -e [quality,test,training]
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -232,14 +233,14 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
run: cat reports/tests_torch_xformers_cuda_failures_short.txt
|
run: cat reports/tests_torch_xformers_cuda_failures_short.txt
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: torch_xformers_test_reports
|
name: torch_xformers_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -255,7 +256,7 @@ jobs:
|
|||||||
options: --gpus all --shm-size "16gb" --ipc host
|
options: --gpus all --shm-size "16gb" --ipc host
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
@@ -264,21 +265,18 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality,training]"
|
||||||
python -m uv pip install -e [quality,test,training]
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run example tests on GPU
|
- name: Run example tests on GPU
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install ".[training]"
|
||||||
python -m uv pip install timm
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile --make-reports=examples_torch_cuda examples/
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
|
|
||||||
|
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
@@ -288,7 +286,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: examples_test_reports
|
name: examples_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
|
|||||||
20
.github/workflows/push_tests_fast.yml
vendored
20
.github/workflows/push_tests_fast.yml
vendored
@@ -18,7 +18,7 @@ env:
|
|||||||
HF_HOME: /mnt/cache
|
HF_HOME: /mnt/cache
|
||||||
OMP_NUM_THREADS: 8
|
OMP_NUM_THREADS: 8
|
||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
HF_HUB_ENABLE_HF_TRANSFER: 1
|
HF_XET_HIGH_PERFORMANCE: 1
|
||||||
PYTEST_TIMEOUT: 600
|
PYTEST_TIMEOUT: 600
|
||||||
RUN_SLOW: no
|
RUN_SLOW: no
|
||||||
|
|
||||||
@@ -54,35 +54,31 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run fast PyTorch CPU tests
|
- name: Run fast PyTorch CPU tests
|
||||||
if: ${{ matrix.config.framework == 'pytorch' }}
|
if: ${{ matrix.config.framework == 'pytorch' }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
-k "not Flax and not Onnx" \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
tests/
|
tests/
|
||||||
|
|
||||||
- name: Run example PyTorch CPU tests
|
- name: Run example PyTorch CPU tests
|
||||||
if: ${{ matrix.config.framework == 'pytorch_examples' }}
|
if: ${{ matrix.config.framework == 'pytorch_examples' }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install ".[training]"
|
||||||
python -m uv pip install peft timm
|
pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
||||||
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
|
||||||
--make-reports=tests_${{ matrix.config.report }} \
|
--make-reports=tests_${{ matrix.config.report }} \
|
||||||
examples
|
examples
|
||||||
|
|
||||||
@@ -92,7 +88,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: pr_${{ matrix.config.report }}_test_reports
|
name: pr_${{ matrix.config.report }}_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
|
|||||||
8
.github/workflows/push_tests_mps.yml
vendored
8
.github/workflows/push_tests_mps.yml
vendored
@@ -8,7 +8,7 @@ env:
|
|||||||
HF_HOME: /mnt/cache
|
HF_HOME: /mnt/cache
|
||||||
OMP_NUM_THREADS: 8
|
OMP_NUM_THREADS: 8
|
||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
HF_HUB_ENABLE_HF_TRANSFER: 1
|
HF_XET_HIGH_PERFORMANCE: 1
|
||||||
PYTEST_TIMEOUT: 600
|
PYTEST_TIMEOUT: 600
|
||||||
RUN_SLOW: no
|
RUN_SLOW: no
|
||||||
|
|
||||||
@@ -23,7 +23,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
@@ -57,7 +57,7 @@ jobs:
|
|||||||
HF_HOME: /System/Volumes/Data/mnt/cache
|
HF_HOME: /System/Volumes/Data/mnt/cache
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
${CONDA_RUN} python -m pytest -n 0 -s -v --make-reports=tests_torch_mps tests/
|
${CONDA_RUN} python -m pytest -n 0 --make-reports=tests_torch_mps tests/
|
||||||
|
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
@@ -65,7 +65,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: pr_torch_mps_test_reports
|
name: pr_torch_mps_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
|
|||||||
12
.github/workflows/pypi_publish.yaml
vendored
12
.github/workflows/pypi_publish.yaml
vendored
@@ -15,12 +15,12 @@ jobs:
|
|||||||
latest_branch: ${{ steps.set_latest_branch.outputs.latest_branch }}
|
latest_branch: ${{ steps.set_latest_branch.outputs.latest_branch }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout Repo
|
- name: Checkout Repo
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: '3.8'
|
python-version: '3.10'
|
||||||
|
|
||||||
- name: Fetch latest branch
|
- name: Fetch latest branch
|
||||||
id: fetch_latest_branch
|
id: fetch_latest_branch
|
||||||
@@ -40,14 +40,14 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout Repo
|
- name: Checkout Repo
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
ref: ${{ needs.find-and-checkout-latest-branch.outputs.latest_branch }}
|
ref: ${{ needs.find-and-checkout-latest-branch.outputs.latest_branch }}
|
||||||
|
|
||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: "3.8"
|
python-version: "3.10"
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
81
.github/workflows/release_tests_fast.yml
vendored
81
.github/workflows/release_tests_fast.yml
vendored
@@ -27,13 +27,12 @@ jobs:
|
|||||||
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -45,7 +44,7 @@ jobs:
|
|||||||
echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT
|
echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT
|
||||||
- name: Pipeline Tests Artifacts
|
- name: Pipeline Tests Artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: test-pipelines.json
|
name: test-pipelines.json
|
||||||
path: reports
|
path: reports
|
||||||
@@ -65,7 +64,7 @@ jobs:
|
|||||||
options: --shm-size "16gb" --ipc host --gpus all
|
options: --shm-size "16gb" --ipc host --gpus all
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
@@ -73,9 +72,8 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -85,8 +83,8 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
||||||
tests/pipelines/${{ matrix.module }}
|
tests/pipelines/${{ matrix.module }}
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
@@ -96,7 +94,7 @@ jobs:
|
|||||||
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
|
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: pipeline_${{ matrix.module }}_test_reports
|
name: pipeline_${{ matrix.module }}_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -118,16 +116,15 @@ jobs:
|
|||||||
module: [models, schedulers, lora, others, single_file]
|
module: [models, schedulers, lora, others, single_file]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install peft@git+https://github.com/huggingface/peft.git
|
||||||
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
@@ -139,8 +136,8 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_torch_${{ matrix.module }}_cuda \
|
--make-reports=tests_torch_${{ matrix.module }}_cuda \
|
||||||
tests/${{ matrix.module }}
|
tests/${{ matrix.module }}
|
||||||
|
|
||||||
@@ -152,7 +149,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: torch_cuda_${{ matrix.module }}_test_reports
|
name: torch_cuda_${{ matrix.module }}_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -169,16 +166,15 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install peft@git+https://github.com/huggingface/peft.git
|
||||||
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
@@ -190,8 +186,8 @@ jobs:
|
|||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "not Flax and not Onnx" \
|
-k "not Flax and not Onnx" \
|
||||||
--make-reports=tests_torch_minimum_cuda \
|
--make-reports=tests_torch_minimum_cuda \
|
||||||
tests/models/test_modeling_common.py \
|
tests/models/test_modeling_common.py \
|
||||||
tests/pipelines/test_pipelines_common.py \
|
tests/pipelines/test_pipelines_common.py \
|
||||||
@@ -209,7 +205,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: torch_minimum_version_cuda_test_reports
|
name: torch_minimum_version_cuda_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -226,7 +222,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
@@ -235,8 +231,7 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality,training]"
|
||||||
python -m uv pip install -e [quality,test,training]
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -245,14 +240,14 @@ jobs:
|
|||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
RUN_COMPILE: yes
|
RUN_COMPILE: yes
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
run: cat reports/tests_torch_compile_cuda_failures_short.txt
|
run: cat reports/tests_torch_compile_cuda_failures_short.txt
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: torch_compile_test_reports
|
name: torch_compile_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -269,7 +264,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
@@ -278,8 +273,7 @@ jobs:
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality,training]"
|
||||||
python -m uv pip install -e [quality,test,training]
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
@@ -287,14 +281,14 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
run: cat reports/tests_torch_xformers_cuda_failures_short.txt
|
run: cat reports/tests_torch_xformers_cuda_failures_short.txt
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: torch_xformers_test_reports
|
name: torch_xformers_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
@@ -311,7 +305,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
@@ -321,21 +315,18 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality,training]"
|
||||||
python -m uv pip install -e [quality,test,training]
|
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
- name: Run example tests on GPU
|
- name: Run example tests on GPU
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install ".[training]"
|
||||||
python -m uv pip install timm
|
pytest -n 1 --max-worker-restart=0 --dist=loadfile --make-reports=examples_torch_cuda examples/
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
|
|
||||||
|
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
@@ -345,7 +336,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: examples_test_reports
|
name: examples_test_reports
|
||||||
path: reports
|
path: reports
|
||||||
|
|||||||
7
.github/workflows/run_tests_from_a_pr.yml
vendored
7
.github/workflows/run_tests_from_a_pr.yml
vendored
@@ -57,15 +57,14 @@ jobs:
|
|||||||
shell: bash -e {0}
|
shell: bash -e {0}
|
||||||
|
|
||||||
- name: Checkout PR branch
|
- name: Checkout PR branch
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
ref: refs/pull/${{ inputs.pr_number }}/head
|
ref: refs/pull/${{ inputs.pr_number }}/head
|
||||||
|
|
||||||
- name: Install pytest
|
- name: Install pytest
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
uv pip install -e ".[quality]"
|
||||||
python -m uv pip install -e [quality,test]
|
uv pip install peft
|
||||||
python -m uv pip install peft
|
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
env:
|
env:
|
||||||
|
|||||||
2
.github/workflows/ssh-pr-runner.yml
vendored
2
.github/workflows/ssh-pr-runner.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/ssh-runner.yml
vendored
2
.github/workflows/ssh-runner.yml
vendored
@@ -35,7 +35,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout diffusers
|
- name: Checkout diffusers
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
|
|||||||
6
.github/workflows/stale.yml
vendored
6
.github/workflows/stale.yml
vendored
@@ -15,12 +15,12 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v1
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: 3.8
|
python-version: 3.10
|
||||||
|
|
||||||
- name: Install requirements
|
- name: Install requirements
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
2
.github/workflows/trufflehog.yml
vendored
2
.github/workflows/trufflehog.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
|||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Secret Scanning
|
- name: Secret Scanning
|
||||||
|
|||||||
2
.github/workflows/typos.yml
vendored
2
.github/workflows/typos.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
|||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: typos-action
|
- name: typos-action
|
||||||
uses: crate-ci/typos@v1.12.4
|
uses: crate-ci/typos@v1.12.4
|
||||||
|
|||||||
2
.github/workflows/update_metadata.yml
vendored
2
.github/workflows/update_metadata.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
|||||||
shell: bash -l {0}
|
shell: bash -l {0}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Setup environment
|
- name: Setup environment
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -125,6 +125,9 @@ dmypy.json
|
|||||||
.vs
|
.vs
|
||||||
.vscode
|
.vscode
|
||||||
|
|
||||||
|
# Cursor
|
||||||
|
.cursor
|
||||||
|
|
||||||
# Pycharm
|
# Pycharm
|
||||||
.idea
|
.idea
|
||||||
|
|
||||||
|
|||||||
2
LICENSE
2
LICENSE
@@ -144,7 +144,7 @@
|
|||||||
agreed to in writing, Licensor provides the Work (and each
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
implied, including, without limitation, any warranties or conditions
|
implied, including, without limitation, Any warranties or conditions
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
appropriateness of using or redistributing the Work and assume any
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
|||||||
@@ -171,7 +171,7 @@ Also, say 👋 in our public Discord channel <a href="https://discord.gg/G7tWnz9
|
|||||||
<tr style="border-top: 2px solid black">
|
<tr style="border-top: 2px solid black">
|
||||||
<td>Text-guided Image Inpainting</td>
|
<td>Text-guided Image Inpainting</td>
|
||||||
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/inpaint">Stable Diffusion Inpainting</a></td>
|
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/inpaint">Stable Diffusion Inpainting</a></td>
|
||||||
<td><a href="https://huggingface.co/runwayml/stable-diffusion-inpainting"> runwayml/stable-diffusion-inpainting </a></td>
|
<td><a href="https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-inpainting"> stable-diffusion-v1-5/stable-diffusion-inpainting </a></td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr style="border-top: 2px solid black">
|
<tr style="border-top: 2px solid black">
|
||||||
<td>Image Variation</td>
|
<td>Image Variation</td>
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import queue
|
|||||||
import threading
|
import threading
|
||||||
from contextlib import nullcontext
|
from contextlib import nullcontext
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Any, Callable, Dict, Optional, Union
|
from typing import Any, Callable
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import torch
|
import torch
|
||||||
@@ -91,10 +91,10 @@ def model_init_fn(model_cls, group_offload_kwargs=None, layerwise_upcasting=Fals
|
|||||||
class BenchmarkScenario:
|
class BenchmarkScenario:
|
||||||
name: str
|
name: str
|
||||||
model_cls: ModelMixin
|
model_cls: ModelMixin
|
||||||
model_init_kwargs: Dict[str, Any]
|
model_init_kwargs: dict[str, Any]
|
||||||
model_init_fn: Callable
|
model_init_fn: Callable
|
||||||
get_model_input_dict: Callable
|
get_model_input_dict: Callable
|
||||||
compile_kwargs: Optional[Dict[str, Any]] = None
|
compile_kwargs: dict[str, Any] | None = None
|
||||||
|
|
||||||
|
|
||||||
@require_torch_gpu
|
@require_torch_gpu
|
||||||
@@ -176,7 +176,7 @@ class BenchmarkMixin:
|
|||||||
result["fullgraph"], result["mode"] = None, None
|
result["fullgraph"], result["mode"] = None, None
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def run_bencmarks_and_collate(self, scenarios: Union[BenchmarkScenario, list[BenchmarkScenario]], filename: str):
|
def run_bencmarks_and_collate(self, scenarios: BenchmarkScenario | list[BenchmarkScenario], filename: str):
|
||||||
if not isinstance(scenarios, list):
|
if not isinstance(scenarios, list):
|
||||||
scenarios = [scenarios]
|
scenarios = [scenarios]
|
||||||
record_queue = queue.Queue()
|
record_queue = queue.Queue()
|
||||||
@@ -214,10 +214,10 @@ class BenchmarkMixin:
|
|||||||
*,
|
*,
|
||||||
model_cls: ModelMixin,
|
model_cls: ModelMixin,
|
||||||
init_fn: Callable,
|
init_fn: Callable,
|
||||||
init_kwargs: Dict[str, Any],
|
init_kwargs: dict[str, Any],
|
||||||
get_input_fn: Callable,
|
get_input_fn: Callable,
|
||||||
compile_kwargs: Optional[Dict[str, Any]],
|
compile_kwargs: dict[str, Any] | None = None,
|
||||||
) -> Dict[str, float]:
|
) -> dict[str, float]:
|
||||||
# setup
|
# setup
|
||||||
self.pre_benchmark()
|
self.pre_benchmark()
|
||||||
|
|
||||||
|
|||||||
@@ -1,56 +1,45 @@
|
|||||||
FROM ubuntu:20.04
|
FROM python:3.10-slim
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE=1
|
||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
LABEL repository="diffusers"
|
LABEL repository="diffusers"
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN apt-get -y update \
|
RUN apt-get -y update && apt-get install -y bash \
|
||||||
&& apt-get install -y software-properties-common \
|
|
||||||
&& add-apt-repository ppa:deadsnakes/ppa
|
|
||||||
|
|
||||||
RUN apt install -y bash \
|
|
||||||
build-essential \
|
build-essential \
|
||||||
git \
|
git \
|
||||||
git-lfs \
|
git-lfs \
|
||||||
curl \
|
curl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
libglib2.0-0 \
|
||||||
libsndfile1-dev \
|
libsndfile1-dev \
|
||||||
python3.10 \
|
|
||||||
python3-pip \
|
|
||||||
libgl1 \
|
libgl1 \
|
||||||
zip \
|
zip \
|
||||||
wget \
|
wget
|
||||||
python3.10-venv && \
|
|
||||||
rm -rf /var/lib/apt/lists
|
|
||||||
|
|
||||||
# make sure to use venv
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
RUN python3.10 -m venv /opt/venv
|
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
RUN pip install uv
|
||||||
python3.10 -m uv pip install --no-cache-dir \
|
RUN uv pip install --no-cache-dir \
|
||||||
torch \
|
torch \
|
||||||
torchvision \
|
torchvision \
|
||||||
torchaudio \
|
torchaudio \
|
||||||
invisible_watermark \
|
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
--extra-index-url https://download.pytorch.org/whl/cpu && \
|
|
||||||
python3.10 -m uv pip install --no-cache-dir \
|
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/diffusers.git@main#egg=diffusers[test]"
|
||||||
|
|
||||||
|
# Extra dependencies
|
||||||
|
RUN uv pip install --no-cache-dir \
|
||||||
accelerate \
|
accelerate \
|
||||||
datasets \
|
|
||||||
hf-doc-builder \
|
|
||||||
huggingface-hub \
|
|
||||||
Jinja2 \
|
|
||||||
librosa \
|
|
||||||
numpy==1.26.4 \
|
numpy==1.26.4 \
|
||||||
scipy \
|
hf_xet \
|
||||||
tensorboard \
|
|
||||||
transformers \
|
|
||||||
matplotlib \
|
|
||||||
setuptools==69.5.1 \
|
setuptools==69.5.1 \
|
||||||
bitsandbytes \
|
bitsandbytes \
|
||||||
torchao \
|
torchao \
|
||||||
gguf \
|
gguf \
|
||||||
optimum-quanto
|
optimum-quanto
|
||||||
|
|
||||||
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
CMD ["/bin/bash"]
|
||||||
|
|||||||
@@ -44,6 +44,6 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
|||||||
scipy \
|
scipy \
|
||||||
tensorboard \
|
tensorboard \
|
||||||
transformers \
|
transformers \
|
||||||
hf_transfer
|
hf_xet
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
CMD ["/bin/bash"]
|
||||||
@@ -38,13 +38,12 @@ RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
|||||||
datasets \
|
datasets \
|
||||||
hf-doc-builder \
|
hf-doc-builder \
|
||||||
huggingface-hub \
|
huggingface-hub \
|
||||||
hf_transfer \
|
hf_xet \
|
||||||
Jinja2 \
|
Jinja2 \
|
||||||
librosa \
|
librosa \
|
||||||
numpy==1.26.4 \
|
numpy==1.26.4 \
|
||||||
scipy \
|
scipy \
|
||||||
tensorboard \
|
tensorboard \
|
||||||
transformers \
|
transformers
|
||||||
hf_transfer
|
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
CMD ["/bin/bash"]
|
||||||
@@ -1,50 +1,38 @@
|
|||||||
FROM ubuntu:20.04
|
FROM python:3.10-slim
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE=1
|
||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
LABEL repository="diffusers"
|
LABEL repository="diffusers"
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN apt-get -y update \
|
RUN apt-get -y update && apt-get install -y bash \
|
||||||
&& apt-get install -y software-properties-common \
|
|
||||||
&& add-apt-repository ppa:deadsnakes/ppa
|
|
||||||
|
|
||||||
RUN apt install -y bash \
|
|
||||||
build-essential \
|
build-essential \
|
||||||
git \
|
git \
|
||||||
git-lfs \
|
git-lfs \
|
||||||
curl \
|
curl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
libglib2.0-0 \
|
||||||
libsndfile1-dev \
|
libsndfile1-dev \
|
||||||
python3.10 \
|
libgl1
|
||||||
python3.10-dev \
|
|
||||||
python3-pip \
|
|
||||||
libgl1 \
|
|
||||||
python3.10-venv && \
|
|
||||||
rm -rf /var/lib/apt/lists
|
|
||||||
|
|
||||||
# make sure to use venv
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
RUN python3.10 -m venv /opt/venv
|
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
RUN pip install uv
|
||||||
python3.10 -m uv pip install --no-cache-dir \
|
RUN uv pip install --no-cache-dir \
|
||||||
torch \
|
torch \
|
||||||
torchvision \
|
torchvision \
|
||||||
torchaudio \
|
torchaudio \
|
||||||
invisible_watermark \
|
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
--extra-index-url https://download.pytorch.org/whl/cpu && \
|
|
||||||
python3.10 -m uv pip install --no-cache-dir \
|
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/diffusers.git@main#egg=diffusers[test]"
|
||||||
|
|
||||||
|
# Extra dependencies
|
||||||
|
RUN uv pip install --no-cache-dir \
|
||||||
accelerate \
|
accelerate \
|
||||||
datasets \
|
|
||||||
hf-doc-builder \
|
|
||||||
huggingface-hub \
|
|
||||||
Jinja2 \
|
|
||||||
librosa \
|
|
||||||
numpy==1.26.4 \
|
numpy==1.26.4 \
|
||||||
scipy \
|
hf_xet
|
||||||
tensorboard \
|
|
||||||
transformers matplotlib \
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
||||||
hf_transfer
|
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
CMD ["/bin/bash"]
|
||||||
|
|||||||
@@ -2,11 +2,13 @@ FROM nvidia/cuda:12.1.0-runtime-ubuntu20.04
|
|||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
LABEL repository="diffusers"
|
LABEL repository="diffusers"
|
||||||
|
|
||||||
|
ARG PYTHON_VERSION=3.12
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN apt-get -y update \
|
RUN apt-get -y update \
|
||||||
&& apt-get install -y software-properties-common \
|
&& apt-get install -y software-properties-common \
|
||||||
&& add-apt-repository ppa:deadsnakes/ppa
|
&& add-apt-repository ppa:deadsnakes/ppa && \
|
||||||
|
apt-get update
|
||||||
|
|
||||||
RUN apt install -y bash \
|
RUN apt install -y bash \
|
||||||
build-essential \
|
build-essential \
|
||||||
@@ -14,38 +16,34 @@ RUN apt install -y bash \
|
|||||||
git-lfs \
|
git-lfs \
|
||||||
curl \
|
curl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
libglib2.0-0 \
|
||||||
libsndfile1-dev \
|
libsndfile1-dev \
|
||||||
libgl1 \
|
libgl1 \
|
||||||
python3.10 \
|
python3 \
|
||||||
python3.10-dev \
|
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3.10-venv && \
|
&& apt-get clean \
|
||||||
rm -rf /var/lib/apt/lists
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# make sure to use venv
|
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
RUN python3.10 -m venv /opt/venv
|
ENV PATH="/root/.local/bin:$PATH"
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
ENV VIRTUAL_ENV="/opt/venv"
|
||||||
|
ENV UV_PYTHON_INSTALL_DIR=/opt/uv/python
|
||||||
|
RUN uv venv --python ${PYTHON_VERSION} --seed ${VIRTUAL_ENV}
|
||||||
|
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
RUN uv pip install --no-cache-dir \
|
||||||
python3.10 -m uv pip install --no-cache-dir \
|
|
||||||
torch \
|
torch \
|
||||||
torchvision \
|
torchvision \
|
||||||
torchaudio \
|
torchaudio
|
||||||
invisible_watermark && \
|
|
||||||
python3.10 -m pip install --no-cache-dir \
|
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/diffusers.git@main#egg=diffusers[test]"
|
||||||
|
|
||||||
|
# Extra dependencies
|
||||||
|
RUN uv pip install --no-cache-dir \
|
||||||
accelerate \
|
accelerate \
|
||||||
datasets \
|
|
||||||
hf-doc-builder \
|
|
||||||
huggingface-hub \
|
|
||||||
hf_transfer \
|
|
||||||
Jinja2 \
|
|
||||||
librosa \
|
|
||||||
numpy==1.26.4 \
|
numpy==1.26.4 \
|
||||||
scipy \
|
|
||||||
tensorboard \
|
|
||||||
transformers \
|
|
||||||
pytorch-lightning \
|
pytorch-lightning \
|
||||||
hf_transfer
|
hf_xet
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
CMD ["/bin/bash"]
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ FROM nvidia/cuda:12.1.0-runtime-ubuntu20.04
|
|||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
LABEL repository="diffusers"
|
LABEL repository="diffusers"
|
||||||
|
|
||||||
|
ARG PYTHON_VERSION=3.10
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
ENV MINIMUM_SUPPORTED_TORCH_VERSION="2.1.0"
|
ENV MINIMUM_SUPPORTED_TORCH_VERSION="2.1.0"
|
||||||
ENV MINIMUM_SUPPORTED_TORCHVISION_VERSION="0.16.0"
|
ENV MINIMUM_SUPPORTED_TORCHVISION_VERSION="0.16.0"
|
||||||
@@ -9,7 +10,8 @@ ENV MINIMUM_SUPPORTED_TORCHAUDIO_VERSION="2.1.0"
|
|||||||
|
|
||||||
RUN apt-get -y update \
|
RUN apt-get -y update \
|
||||||
&& apt-get install -y software-properties-common \
|
&& apt-get install -y software-properties-common \
|
||||||
&& add-apt-repository ppa:deadsnakes/ppa
|
&& add-apt-repository ppa:deadsnakes/ppa && \
|
||||||
|
apt-get update
|
||||||
|
|
||||||
RUN apt install -y bash \
|
RUN apt install -y bash \
|
||||||
build-essential \
|
build-essential \
|
||||||
@@ -17,37 +19,34 @@ RUN apt install -y bash \
|
|||||||
git-lfs \
|
git-lfs \
|
||||||
curl \
|
curl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
libglib2.0-0 \
|
||||||
libsndfile1-dev \
|
libsndfile1-dev \
|
||||||
libgl1 \
|
libgl1 \
|
||||||
python3.10 \
|
python3 \
|
||||||
python3.10-dev \
|
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3.10-venv && \
|
&& apt-get clean \
|
||||||
rm -rf /var/lib/apt/lists
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# make sure to use venv
|
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
RUN python3.10 -m venv /opt/venv
|
ENV PATH="/root/.local/bin:$PATH"
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
ENV VIRTUAL_ENV="/opt/venv"
|
||||||
|
ENV UV_PYTHON_INSTALL_DIR=/opt/uv/python
|
||||||
|
RUN uv venv --python ${PYTHON_VERSION} --seed ${VIRTUAL_ENV}
|
||||||
|
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
RUN uv pip install --no-cache-dir \
|
||||||
python3.10 -m uv pip install --no-cache-dir \
|
|
||||||
torch==$MINIMUM_SUPPORTED_TORCH_VERSION \
|
torch==$MINIMUM_SUPPORTED_TORCH_VERSION \
|
||||||
torchvision==$MINIMUM_SUPPORTED_TORCHVISION_VERSION \
|
torchvision==$MINIMUM_SUPPORTED_TORCHVISION_VERSION \
|
||||||
torchaudio==$MINIMUM_SUPPORTED_TORCHAUDIO_VERSION \
|
torchaudio==$MINIMUM_SUPPORTED_TORCHAUDIO_VERSION
|
||||||
invisible_watermark && \
|
|
||||||
python3.10 -m pip install --no-cache-dir \
|
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/diffusers.git@main#egg=diffusers[test]"
|
||||||
|
|
||||||
|
# Extra dependencies
|
||||||
|
RUN uv pip install --no-cache-dir \
|
||||||
accelerate \
|
accelerate \
|
||||||
datasets \
|
|
||||||
hf-doc-builder \
|
|
||||||
huggingface-hub \
|
|
||||||
hf_transfer \
|
|
||||||
Jinja2 \
|
|
||||||
librosa \
|
|
||||||
numpy==1.26.4 \
|
numpy==1.26.4 \
|
||||||
scipy \
|
pytorch-lightning \
|
||||||
tensorboard \
|
hf_xet
|
||||||
transformers \
|
|
||||||
hf_transfer
|
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
CMD ["/bin/bash"]
|
||||||
|
|||||||
@@ -2,11 +2,13 @@ FROM nvidia/cuda:12.1.0-runtime-ubuntu20.04
|
|||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
LABEL repository="diffusers"
|
LABEL repository="diffusers"
|
||||||
|
|
||||||
|
ARG PYTHON_VERSION=3.12
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN apt-get -y update \
|
RUN apt-get -y update \
|
||||||
&& apt-get install -y software-properties-common \
|
&& apt-get install -y software-properties-common \
|
||||||
&& add-apt-repository ppa:deadsnakes/ppa
|
&& add-apt-repository ppa:deadsnakes/ppa && \
|
||||||
|
apt-get update
|
||||||
|
|
||||||
RUN apt install -y bash \
|
RUN apt install -y bash \
|
||||||
build-essential \
|
build-essential \
|
||||||
@@ -14,38 +16,35 @@ RUN apt install -y bash \
|
|||||||
git-lfs \
|
git-lfs \
|
||||||
curl \
|
curl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
libglib2.0-0 \
|
||||||
libsndfile1-dev \
|
libsndfile1-dev \
|
||||||
libgl1 \
|
libgl1 \
|
||||||
python3.10 \
|
python3 \
|
||||||
python3.10-dev \
|
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3.10-venv && \
|
&& apt-get clean \
|
||||||
rm -rf /var/lib/apt/lists
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# make sure to use venv
|
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
RUN python3.10 -m venv /opt/venv
|
ENV PATH="/root/.local/bin:$PATH"
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
ENV VIRTUAL_ENV="/opt/venv"
|
||||||
|
ENV UV_PYTHON_INSTALL_DIR=/opt/uv/python
|
||||||
|
RUN uv venv --python ${PYTHON_VERSION} --seed ${VIRTUAL_ENV}
|
||||||
|
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||||
|
|
||||||
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
||||||
RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
|
RUN uv pip install --no-cache-dir \
|
||||||
python3.10 -m pip install --no-cache-dir \
|
|
||||||
torch \
|
torch \
|
||||||
torchvision \
|
torchvision \
|
||||||
torchaudio \
|
torchaudio
|
||||||
invisible_watermark && \
|
|
||||||
python3.10 -m uv pip install --no-cache-dir \
|
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/diffusers.git@main#egg=diffusers[test]"
|
||||||
|
|
||||||
|
# Extra dependencies
|
||||||
|
RUN uv pip install --no-cache-dir \
|
||||||
accelerate \
|
accelerate \
|
||||||
datasets \
|
|
||||||
hf-doc-builder \
|
|
||||||
huggingface-hub \
|
|
||||||
hf_transfer \
|
|
||||||
Jinja2 \
|
|
||||||
librosa \
|
|
||||||
numpy==1.26.4 \
|
numpy==1.26.4 \
|
||||||
scipy \
|
pytorch-lightning \
|
||||||
tensorboard \
|
hf_xet \
|
||||||
transformers \
|
xformers
|
||||||
xformers \
|
|
||||||
hf_transfer
|
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
CMD ["/bin/bash"]
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
- title: Get started
|
- sections:
|
||||||
sections:
|
|
||||||
- local: index
|
- local: index
|
||||||
title: Diffusers
|
title: Diffusers
|
||||||
- local: installation
|
- local: installation
|
||||||
@@ -8,9 +7,8 @@
|
|||||||
title: Quickstart
|
title: Quickstart
|
||||||
- local: stable_diffusion
|
- local: stable_diffusion
|
||||||
title: Basic performance
|
title: Basic performance
|
||||||
|
title: Get started
|
||||||
- title: Pipelines
|
- isExpanded: false
|
||||||
isExpanded: false
|
|
||||||
sections:
|
sections:
|
||||||
- local: using-diffusers/loading
|
- local: using-diffusers/loading
|
||||||
title: DiffusionPipeline
|
title: DiffusionPipeline
|
||||||
@@ -23,16 +21,15 @@
|
|||||||
- local: using-diffusers/reusing_seeds
|
- local: using-diffusers/reusing_seeds
|
||||||
title: Reproducibility
|
title: Reproducibility
|
||||||
- local: using-diffusers/schedulers
|
- local: using-diffusers/schedulers
|
||||||
title: Load schedulers and models
|
title: Schedulers
|
||||||
- local: using-diffusers/scheduler_features
|
- local: using-diffusers/automodel
|
||||||
title: Scheduler features
|
title: AutoModel
|
||||||
- local: using-diffusers/other-formats
|
- local: using-diffusers/other-formats
|
||||||
title: Model files and layouts
|
title: Model formats
|
||||||
- local: using-diffusers/push_to_hub
|
- local: using-diffusers/push_to_hub
|
||||||
title: Push files to the Hub
|
title: Sharing pipelines and models
|
||||||
|
title: Pipelines
|
||||||
- title: Adapters
|
- isExpanded: false
|
||||||
isExpanded: false
|
|
||||||
sections:
|
sections:
|
||||||
- local: tutorials/using_peft_for_inference
|
- local: tutorials/using_peft_for_inference
|
||||||
title: LoRA
|
title: LoRA
|
||||||
@@ -46,38 +43,33 @@
|
|||||||
title: DreamBooth
|
title: DreamBooth
|
||||||
- local: using-diffusers/textual_inversion_inference
|
- local: using-diffusers/textual_inversion_inference
|
||||||
title: Textual inversion
|
title: Textual inversion
|
||||||
|
title: Adapters
|
||||||
- title: Inference
|
- isExpanded: false
|
||||||
isExpanded: false
|
|
||||||
sections:
|
sections:
|
||||||
- local: using-diffusers/weighted_prompts
|
- local: using-diffusers/weighted_prompts
|
||||||
title: Prompt techniques
|
title: Prompting
|
||||||
- local: using-diffusers/create_a_server
|
- local: using-diffusers/create_a_server
|
||||||
title: Create a server
|
title: Create a server
|
||||||
- local: using-diffusers/batched_inference
|
- local: using-diffusers/batched_inference
|
||||||
title: Batch inference
|
title: Batch inference
|
||||||
- local: training/distributed_inference
|
- local: training/distributed_inference
|
||||||
title: Distributed inference
|
title: Distributed inference
|
||||||
- local: using-diffusers/scheduler_features
|
- local: hybrid_inference/overview
|
||||||
title: Scheduler features
|
title: Remote inference
|
||||||
- local: using-diffusers/callback
|
title: Inference
|
||||||
title: Pipeline callbacks
|
- isExpanded: false
|
||||||
- local: using-diffusers/image_quality
|
|
||||||
title: Controlling image quality
|
|
||||||
|
|
||||||
- title: Inference optimization
|
|
||||||
isExpanded: false
|
|
||||||
sections:
|
sections:
|
||||||
- local: optimization/fp16
|
- local: optimization/fp16
|
||||||
title: Accelerate inference
|
title: Accelerate inference
|
||||||
- local: optimization/cache
|
- local: optimization/cache
|
||||||
title: Caching
|
title: Caching
|
||||||
|
- local: optimization/attention_backends
|
||||||
|
title: Attention backends
|
||||||
- local: optimization/memory
|
- local: optimization/memory
|
||||||
title: Reduce memory usage
|
title: Reduce memory usage
|
||||||
- local: optimization/speed-memory-optims
|
- local: optimization/speed-memory-optims
|
||||||
title: Compiling and offloading quantized models
|
title: Compiling and offloading quantized models
|
||||||
- title: Community optimizations
|
- sections:
|
||||||
sections:
|
|
||||||
- local: optimization/pruna
|
- local: optimization/pruna
|
||||||
title: Pruna
|
title: Pruna
|
||||||
- local: optimization/xformers
|
- local: optimization/xformers
|
||||||
@@ -86,27 +78,19 @@
|
|||||||
title: Token merging
|
title: Token merging
|
||||||
- local: optimization/deepcache
|
- local: optimization/deepcache
|
||||||
title: DeepCache
|
title: DeepCache
|
||||||
|
- local: optimization/cache_dit
|
||||||
|
title: CacheDiT
|
||||||
- local: optimization/tgate
|
- local: optimization/tgate
|
||||||
title: TGATE
|
title: TGATE
|
||||||
- local: optimization/xdit
|
- local: optimization/xdit
|
||||||
title: xDiT
|
title: xDiT
|
||||||
- local: optimization/para_attn
|
- local: optimization/para_attn
|
||||||
title: ParaAttention
|
title: ParaAttention
|
||||||
|
- local: using-diffusers/image_quality
|
||||||
- title: Hybrid Inference
|
title: FreeU
|
||||||
isExpanded: false
|
title: Community optimizations
|
||||||
sections:
|
title: Inference optimization
|
||||||
- local: hybrid_inference/overview
|
- isExpanded: false
|
||||||
title: Overview
|
|
||||||
- local: hybrid_inference/vae_decode
|
|
||||||
title: VAE Decode
|
|
||||||
- local: hybrid_inference/vae_encode
|
|
||||||
title: VAE Encode
|
|
||||||
- local: hybrid_inference/api_reference
|
|
||||||
title: API Reference
|
|
||||||
|
|
||||||
- title: Modular Diffusers
|
|
||||||
isExpanded: false
|
|
||||||
sections:
|
sections:
|
||||||
- local: modular_diffusers/overview
|
- local: modular_diffusers/overview
|
||||||
title: Overview
|
title: Overview
|
||||||
@@ -128,9 +112,10 @@
|
|||||||
title: ComponentsManager
|
title: ComponentsManager
|
||||||
- local: modular_diffusers/guiders
|
- local: modular_diffusers/guiders
|
||||||
title: Guiders
|
title: Guiders
|
||||||
|
- local: modular_diffusers/custom_blocks
|
||||||
- title: Training
|
title: Building Custom Blocks
|
||||||
isExpanded: false
|
title: Modular Diffusers
|
||||||
|
- isExpanded: false
|
||||||
sections:
|
sections:
|
||||||
- local: training/overview
|
- local: training/overview
|
||||||
title: Overview
|
title: Overview
|
||||||
@@ -140,8 +125,7 @@
|
|||||||
title: Adapt a model to a new task
|
title: Adapt a model to a new task
|
||||||
- local: tutorials/basic_training
|
- local: tutorials/basic_training
|
||||||
title: Train a diffusion model
|
title: Train a diffusion model
|
||||||
- title: Models
|
- sections:
|
||||||
sections:
|
|
||||||
- local: training/unconditional_training
|
- local: training/unconditional_training
|
||||||
title: Unconditional image generation
|
title: Unconditional image generation
|
||||||
- local: training/text2image
|
- local: training/text2image
|
||||||
@@ -160,8 +144,8 @@
|
|||||||
title: InstructPix2Pix
|
title: InstructPix2Pix
|
||||||
- local: training/cogvideox
|
- local: training/cogvideox
|
||||||
title: CogVideoX
|
title: CogVideoX
|
||||||
- title: Methods
|
title: Models
|
||||||
sections:
|
- sections:
|
||||||
- local: training/text_inversion
|
- local: training/text_inversion
|
||||||
title: Textual Inversion
|
title: Textual Inversion
|
||||||
- local: training/dreambooth
|
- local: training/dreambooth
|
||||||
@@ -174,9 +158,9 @@
|
|||||||
title: Latent Consistency Distillation
|
title: Latent Consistency Distillation
|
||||||
- local: training/ddpo
|
- local: training/ddpo
|
||||||
title: Reinforcement learning training with DDPO
|
title: Reinforcement learning training with DDPO
|
||||||
|
title: Methods
|
||||||
- title: Quantization
|
title: Training
|
||||||
isExpanded: false
|
- isExpanded: false
|
||||||
sections:
|
sections:
|
||||||
- local: quantization/overview
|
- local: quantization/overview
|
||||||
title: Getting started
|
title: Getting started
|
||||||
@@ -188,9 +172,10 @@
|
|||||||
title: torchao
|
title: torchao
|
||||||
- local: quantization/quanto
|
- local: quantization/quanto
|
||||||
title: quanto
|
title: quanto
|
||||||
|
- local: quantization/modelopt
|
||||||
- title: Model accelerators and hardware
|
title: NVIDIA ModelOpt
|
||||||
isExpanded: false
|
title: Quantization
|
||||||
|
- isExpanded: false
|
||||||
sections:
|
sections:
|
||||||
- local: optimization/onnx
|
- local: optimization/onnx
|
||||||
title: ONNX
|
title: ONNX
|
||||||
@@ -204,9 +189,8 @@
|
|||||||
title: Intel Gaudi
|
title: Intel Gaudi
|
||||||
- local: optimization/neuron
|
- local: optimization/neuron
|
||||||
title: AWS Neuron
|
title: AWS Neuron
|
||||||
|
title: Model accelerators and hardware
|
||||||
- title: Specific pipeline examples
|
- isExpanded: false
|
||||||
isExpanded: false
|
|
||||||
sections:
|
sections:
|
||||||
- local: using-diffusers/consisid
|
- local: using-diffusers/consisid
|
||||||
title: ConsisID
|
title: ConsisID
|
||||||
@@ -232,12 +216,10 @@
|
|||||||
title: Stable Video Diffusion
|
title: Stable Video Diffusion
|
||||||
- local: using-diffusers/marigold_usage
|
- local: using-diffusers/marigold_usage
|
||||||
title: Marigold Computer Vision
|
title: Marigold Computer Vision
|
||||||
|
title: Specific pipeline examples
|
||||||
- title: Resources
|
- isExpanded: false
|
||||||
isExpanded: false
|
|
||||||
sections:
|
|
||||||
- title: Task recipes
|
|
||||||
sections:
|
sections:
|
||||||
|
- sections:
|
||||||
- local: using-diffusers/unconditional_image_generation
|
- local: using-diffusers/unconditional_image_generation
|
||||||
title: Unconditional image generation
|
title: Unconditional image generation
|
||||||
- local: using-diffusers/conditional_image_generation
|
- local: using-diffusers/conditional_image_generation
|
||||||
@@ -252,6 +234,7 @@
|
|||||||
title: Video generation
|
title: Video generation
|
||||||
- local: using-diffusers/depth2img
|
- local: using-diffusers/depth2img
|
||||||
title: Depth-to-image
|
title: Depth-to-image
|
||||||
|
title: Task recipes
|
||||||
- local: using-diffusers/write_own_pipeline
|
- local: using-diffusers/write_own_pipeline
|
||||||
title: Understanding pipelines, models and schedulers
|
title: Understanding pipelines, models and schedulers
|
||||||
- local: community_projects
|
- local: community_projects
|
||||||
@@ -266,12 +249,10 @@
|
|||||||
title: Diffusers' Ethical Guidelines
|
title: Diffusers' Ethical Guidelines
|
||||||
- local: conceptual/evaluation
|
- local: conceptual/evaluation
|
||||||
title: Evaluating Diffusion Models
|
title: Evaluating Diffusion Models
|
||||||
|
title: Resources
|
||||||
- title: API
|
- isExpanded: false
|
||||||
isExpanded: false
|
|
||||||
sections:
|
|
||||||
- title: Main Classes
|
|
||||||
sections:
|
sections:
|
||||||
|
- sections:
|
||||||
- local: api/configuration
|
- local: api/configuration
|
||||||
title: Configuration
|
title: Configuration
|
||||||
- local: api/logging
|
- local: api/logging
|
||||||
@@ -280,8 +261,12 @@
|
|||||||
title: Outputs
|
title: Outputs
|
||||||
- local: api/quantization
|
- local: api/quantization
|
||||||
title: Quantization
|
title: Quantization
|
||||||
- title: Modular
|
- local: hybrid_inference/api_reference
|
||||||
sections:
|
title: Remote inference
|
||||||
|
- local: api/parallel
|
||||||
|
title: Parallel inference
|
||||||
|
title: Main Classes
|
||||||
|
- sections:
|
||||||
- local: api/modular_diffusers/pipeline
|
- local: api/modular_diffusers/pipeline
|
||||||
title: Pipeline
|
title: Pipeline
|
||||||
- local: api/modular_diffusers/pipeline_blocks
|
- local: api/modular_diffusers/pipeline_blocks
|
||||||
@@ -292,8 +277,8 @@
|
|||||||
title: Components and configs
|
title: Components and configs
|
||||||
- local: api/modular_diffusers/guiders
|
- local: api/modular_diffusers/guiders
|
||||||
title: Guiders
|
title: Guiders
|
||||||
- title: Loaders
|
title: Modular
|
||||||
sections:
|
- sections:
|
||||||
- local: api/loaders/ip_adapter
|
- local: api/loaders/ip_adapter
|
||||||
title: IP-Adapter
|
title: IP-Adapter
|
||||||
- local: api/loaders/lora
|
- local: api/loaders/lora
|
||||||
@@ -308,14 +293,13 @@
|
|||||||
title: SD3Transformer2D
|
title: SD3Transformer2D
|
||||||
- local: api/loaders/peft
|
- local: api/loaders/peft
|
||||||
title: PEFT
|
title: PEFT
|
||||||
- title: Models
|
title: Loaders
|
||||||
sections:
|
- sections:
|
||||||
- local: api/models/overview
|
- local: api/models/overview
|
||||||
title: Overview
|
title: Overview
|
||||||
- local: api/models/auto_model
|
- local: api/models/auto_model
|
||||||
title: AutoModel
|
title: AutoModel
|
||||||
- title: ControlNets
|
- sections:
|
||||||
sections:
|
|
||||||
- local: api/models/controlnet
|
- local: api/models/controlnet
|
||||||
title: ControlNetModel
|
title: ControlNetModel
|
||||||
- local: api/models/controlnet_union
|
- local: api/models/controlnet_union
|
||||||
@@ -330,16 +314,20 @@
|
|||||||
title: SD3ControlNetModel
|
title: SD3ControlNetModel
|
||||||
- local: api/models/controlnet_sparsectrl
|
- local: api/models/controlnet_sparsectrl
|
||||||
title: SparseControlNetModel
|
title: SparseControlNetModel
|
||||||
- title: Transformers
|
title: ControlNets
|
||||||
sections:
|
- sections:
|
||||||
- local: api/models/allegro_transformer3d
|
- local: api/models/allegro_transformer3d
|
||||||
title: AllegroTransformer3DModel
|
title: AllegroTransformer3DModel
|
||||||
- local: api/models/aura_flow_transformer2d
|
- local: api/models/aura_flow_transformer2d
|
||||||
title: AuraFlowTransformer2DModel
|
title: AuraFlowTransformer2DModel
|
||||||
|
- local: api/models/transformer_bria_fibo
|
||||||
|
title: BriaFiboTransformer2DModel
|
||||||
- local: api/models/bria_transformer
|
- local: api/models/bria_transformer
|
||||||
title: BriaTransformer2DModel
|
title: BriaTransformer2DModel
|
||||||
- local: api/models/chroma_transformer
|
- local: api/models/chroma_transformer
|
||||||
title: ChromaTransformer2DModel
|
title: ChromaTransformer2DModel
|
||||||
|
- local: api/models/chronoedit_transformer_3d
|
||||||
|
title: ChronoEditTransformer3DModel
|
||||||
- local: api/models/cogvideox_transformer3d
|
- local: api/models/cogvideox_transformer3d
|
||||||
title: CogVideoXTransformer3DModel
|
title: CogVideoXTransformer3DModel
|
||||||
- local: api/models/cogview3plus_transformer2d
|
- local: api/models/cogview3plus_transformer2d
|
||||||
@@ -354,16 +342,28 @@
|
|||||||
title: DiTTransformer2DModel
|
title: DiTTransformer2DModel
|
||||||
- local: api/models/easyanimate_transformer3d
|
- local: api/models/easyanimate_transformer3d
|
||||||
title: EasyAnimateTransformer3DModel
|
title: EasyAnimateTransformer3DModel
|
||||||
|
- local: api/models/flux2_transformer
|
||||||
|
title: Flux2Transformer2DModel
|
||||||
- local: api/models/flux_transformer
|
- local: api/models/flux_transformer
|
||||||
title: FluxTransformer2DModel
|
title: FluxTransformer2DModel
|
||||||
|
- local: api/models/glm_image_transformer2d
|
||||||
|
title: GlmImageTransformer2DModel
|
||||||
- local: api/models/hidream_image_transformer
|
- local: api/models/hidream_image_transformer
|
||||||
title: HiDreamImageTransformer2DModel
|
title: HiDreamImageTransformer2DModel
|
||||||
- local: api/models/hunyuan_transformer2d
|
- local: api/models/hunyuan_transformer2d
|
||||||
title: HunyuanDiT2DModel
|
title: HunyuanDiT2DModel
|
||||||
|
- local: api/models/hunyuanimage_transformer_2d
|
||||||
|
title: HunyuanImageTransformer2DModel
|
||||||
|
- local: api/models/hunyuan_video15_transformer_3d
|
||||||
|
title: HunyuanVideo15Transformer3DModel
|
||||||
- local: api/models/hunyuan_video_transformer_3d
|
- local: api/models/hunyuan_video_transformer_3d
|
||||||
title: HunyuanVideoTransformer3DModel
|
title: HunyuanVideoTransformer3DModel
|
||||||
- local: api/models/latte_transformer3d
|
- local: api/models/latte_transformer3d
|
||||||
title: LatteTransformer3DModel
|
title: LatteTransformer3DModel
|
||||||
|
- local: api/models/longcat_image_transformer2d
|
||||||
|
title: LongCatImageTransformer2DModel
|
||||||
|
- local: api/models/ltx2_video_transformer3d
|
||||||
|
title: LTX2VideoTransformer3DModel
|
||||||
- local: api/models/ltx_video_transformer3d
|
- local: api/models/ltx_video_transformer3d
|
||||||
title: LTXVideoTransformer3DModel
|
title: LTXVideoTransformer3DModel
|
||||||
- local: api/models/lumina2_transformer2d
|
- local: api/models/lumina2_transformer2d
|
||||||
@@ -374,6 +374,8 @@
|
|||||||
title: MochiTransformer3DModel
|
title: MochiTransformer3DModel
|
||||||
- local: api/models/omnigen_transformer
|
- local: api/models/omnigen_transformer
|
||||||
title: OmniGenTransformer2DModel
|
title: OmniGenTransformer2DModel
|
||||||
|
- local: api/models/ovisimage_transformer2d
|
||||||
|
title: OvisImageTransformer2DModel
|
||||||
- local: api/models/pixart_transformer2d
|
- local: api/models/pixart_transformer2d
|
||||||
title: PixArtTransformer2DModel
|
title: PixArtTransformer2DModel
|
||||||
- local: api/models/prior_transformer
|
- local: api/models/prior_transformer
|
||||||
@@ -382,6 +384,8 @@
|
|||||||
title: QwenImageTransformer2DModel
|
title: QwenImageTransformer2DModel
|
||||||
- local: api/models/sana_transformer2d
|
- local: api/models/sana_transformer2d
|
||||||
title: SanaTransformer2DModel
|
title: SanaTransformer2DModel
|
||||||
|
- local: api/models/sana_video_transformer3d
|
||||||
|
title: SanaVideoTransformer3DModel
|
||||||
- local: api/models/sd3_transformer2d
|
- local: api/models/sd3_transformer2d
|
||||||
title: SD3Transformer2DModel
|
title: SD3Transformer2DModel
|
||||||
- local: api/models/skyreels_v2_transformer_3d
|
- local: api/models/skyreels_v2_transformer_3d
|
||||||
@@ -392,10 +396,14 @@
|
|||||||
title: Transformer2DModel
|
title: Transformer2DModel
|
||||||
- local: api/models/transformer_temporal
|
- local: api/models/transformer_temporal
|
||||||
title: TransformerTemporalModel
|
title: TransformerTemporalModel
|
||||||
|
- local: api/models/wan_animate_transformer_3d
|
||||||
|
title: WanAnimateTransformer3DModel
|
||||||
- local: api/models/wan_transformer_3d
|
- local: api/models/wan_transformer_3d
|
||||||
title: WanTransformer3DModel
|
title: WanTransformer3DModel
|
||||||
- title: UNets
|
- local: api/models/z_image_transformer2d
|
||||||
sections:
|
title: ZImageTransformer2DModel
|
||||||
|
title: Transformers
|
||||||
|
- sections:
|
||||||
- local: api/models/stable_cascade_unet
|
- local: api/models/stable_cascade_unet
|
||||||
title: StableCascadeUNet
|
title: StableCascadeUNet
|
||||||
- local: api/models/unet
|
- local: api/models/unet
|
||||||
@@ -410,8 +418,8 @@
|
|||||||
title: UNetMotionModel
|
title: UNetMotionModel
|
||||||
- local: api/models/uvit2d
|
- local: api/models/uvit2d
|
||||||
title: UViT2DModel
|
title: UViT2DModel
|
||||||
- title: VAEs
|
title: UNets
|
||||||
sections:
|
- sections:
|
||||||
- local: api/models/asymmetricautoencoderkl
|
- local: api/models/asymmetricautoencoderkl
|
||||||
title: AsymmetricAutoencoderKL
|
title: AsymmetricAutoencoderKL
|
||||||
- local: api/models/autoencoder_dc
|
- local: api/models/autoencoder_dc
|
||||||
@@ -424,8 +432,18 @@
|
|||||||
title: AutoencoderKLCogVideoX
|
title: AutoencoderKLCogVideoX
|
||||||
- local: api/models/autoencoderkl_cosmos
|
- local: api/models/autoencoderkl_cosmos
|
||||||
title: AutoencoderKLCosmos
|
title: AutoencoderKLCosmos
|
||||||
|
- local: api/models/autoencoder_kl_hunyuanimage
|
||||||
|
title: AutoencoderKLHunyuanImage
|
||||||
|
- local: api/models/autoencoder_kl_hunyuanimage_refiner
|
||||||
|
title: AutoencoderKLHunyuanImageRefiner
|
||||||
- local: api/models/autoencoder_kl_hunyuan_video
|
- local: api/models/autoencoder_kl_hunyuan_video
|
||||||
title: AutoencoderKLHunyuanVideo
|
title: AutoencoderKLHunyuanVideo
|
||||||
|
- local: api/models/autoencoder_kl_hunyuan_video15
|
||||||
|
title: AutoencoderKLHunyuanVideo15
|
||||||
|
- local: api/models/autoencoderkl_audio_ltx_2
|
||||||
|
title: AutoencoderKLLTX2Audio
|
||||||
|
- local: api/models/autoencoderkl_ltx_2
|
||||||
|
title: AutoencoderKLLTX2Video
|
||||||
- local: api/models/autoencoderkl_ltx_video
|
- local: api/models/autoencoderkl_ltx_video
|
||||||
title: AutoencoderKLLTXVideo
|
title: AutoencoderKLLTXVideo
|
||||||
- local: api/models/autoencoderkl_magvit
|
- local: api/models/autoencoderkl_magvit
|
||||||
@@ -444,40 +462,46 @@
|
|||||||
title: Tiny AutoEncoder
|
title: Tiny AutoEncoder
|
||||||
- local: api/models/vq
|
- local: api/models/vq
|
||||||
title: VQModel
|
title: VQModel
|
||||||
- title: Pipelines
|
title: VAEs
|
||||||
sections:
|
title: Models
|
||||||
|
- sections:
|
||||||
- local: api/pipelines/overview
|
- local: api/pipelines/overview
|
||||||
title: Overview
|
title: Overview
|
||||||
- local: api/pipelines/allegro
|
- local: api/pipelines/auto_pipeline
|
||||||
title: Allegro
|
title: AutoPipeline
|
||||||
|
- sections:
|
||||||
|
- local: api/pipelines/audioldm
|
||||||
|
title: AudioLDM
|
||||||
|
- local: api/pipelines/audioldm2
|
||||||
|
title: AudioLDM 2
|
||||||
|
- local: api/pipelines/dance_diffusion
|
||||||
|
title: Dance Diffusion
|
||||||
|
- local: api/pipelines/musicldm
|
||||||
|
title: MusicLDM
|
||||||
|
- local: api/pipelines/stable_audio
|
||||||
|
title: Stable Audio
|
||||||
|
title: Audio
|
||||||
|
- sections:
|
||||||
- local: api/pipelines/amused
|
- local: api/pipelines/amused
|
||||||
title: aMUSEd
|
title: aMUSEd
|
||||||
- local: api/pipelines/animatediff
|
- local: api/pipelines/animatediff
|
||||||
title: AnimateDiff
|
title: AnimateDiff
|
||||||
- local: api/pipelines/attend_and_excite
|
- local: api/pipelines/attend_and_excite
|
||||||
title: Attend-and-Excite
|
title: Attend-and-Excite
|
||||||
- local: api/pipelines/audioldm
|
|
||||||
title: AudioLDM
|
|
||||||
- local: api/pipelines/audioldm2
|
|
||||||
title: AudioLDM 2
|
|
||||||
- local: api/pipelines/aura_flow
|
- local: api/pipelines/aura_flow
|
||||||
title: AuraFlow
|
title: AuraFlow
|
||||||
- local: api/pipelines/auto_pipeline
|
|
||||||
title: AutoPipeline
|
|
||||||
- local: api/pipelines/blip_diffusion
|
- local: api/pipelines/blip_diffusion
|
||||||
title: BLIP-Diffusion
|
title: BLIP-Diffusion
|
||||||
- local: api/pipelines/bria_3_2
|
- local: api/pipelines/bria_3_2
|
||||||
title: Bria 3.2
|
title: Bria 3.2
|
||||||
|
- local: api/pipelines/bria_fibo
|
||||||
|
title: Bria Fibo
|
||||||
- local: api/pipelines/chroma
|
- local: api/pipelines/chroma
|
||||||
title: Chroma
|
title: Chroma
|
||||||
- local: api/pipelines/cogvideox
|
|
||||||
title: CogVideoX
|
|
||||||
- local: api/pipelines/cogview3
|
- local: api/pipelines/cogview3
|
||||||
title: CogView3
|
title: CogView3
|
||||||
- local: api/pipelines/cogview4
|
- local: api/pipelines/cogview4
|
||||||
title: CogView4
|
title: CogView4
|
||||||
- local: api/pipelines/consisid
|
|
||||||
title: ConsisID
|
|
||||||
- local: api/pipelines/consistency_models
|
- local: api/pipelines/consistency_models
|
||||||
title: Consistency Models
|
title: Consistency Models
|
||||||
- local: api/pipelines/controlnet
|
- local: api/pipelines/controlnet
|
||||||
@@ -500,8 +524,6 @@
|
|||||||
title: ControlNetUnion
|
title: ControlNetUnion
|
||||||
- local: api/pipelines/cosmos
|
- local: api/pipelines/cosmos
|
||||||
title: Cosmos
|
title: Cosmos
|
||||||
- local: api/pipelines/dance_diffusion
|
|
||||||
title: Dance Diffusion
|
|
||||||
- local: api/pipelines/ddim
|
- local: api/pipelines/ddim
|
||||||
title: DDIM
|
title: DDIM
|
||||||
- local: api/pipelines/ddpm
|
- local: api/pipelines/ddpm
|
||||||
@@ -516,18 +538,18 @@
|
|||||||
title: EasyAnimate
|
title: EasyAnimate
|
||||||
- local: api/pipelines/flux
|
- local: api/pipelines/flux
|
||||||
title: Flux
|
title: Flux
|
||||||
|
- local: api/pipelines/flux2
|
||||||
|
title: Flux2
|
||||||
- local: api/pipelines/control_flux_inpaint
|
- local: api/pipelines/control_flux_inpaint
|
||||||
title: FluxControlInpaint
|
title: FluxControlInpaint
|
||||||
- local: api/pipelines/framepack
|
- local: api/pipelines/glm_image
|
||||||
title: Framepack
|
title: GLM-Image
|
||||||
- local: api/pipelines/hidream
|
- local: api/pipelines/hidream
|
||||||
title: HiDream-I1
|
title: HiDream-I1
|
||||||
- local: api/pipelines/hunyuandit
|
- local: api/pipelines/hunyuandit
|
||||||
title: Hunyuan-DiT
|
title: Hunyuan-DiT
|
||||||
- local: api/pipelines/hunyuan_video
|
- local: api/pipelines/hunyuanimage21
|
||||||
title: HunyuanVideo
|
title: HunyuanImage2.1
|
||||||
- local: api/pipelines/i2vgenxl
|
|
||||||
title: I2VGen-XL
|
|
||||||
- local: api/pipelines/pix2pix
|
- local: api/pipelines/pix2pix
|
||||||
title: InstructPix2Pix
|
title: InstructPix2Pix
|
||||||
- local: api/pipelines/kandinsky
|
- local: api/pipelines/kandinsky
|
||||||
@@ -536,62 +558,57 @@
|
|||||||
title: Kandinsky 2.2
|
title: Kandinsky 2.2
|
||||||
- local: api/pipelines/kandinsky3
|
- local: api/pipelines/kandinsky3
|
||||||
title: Kandinsky 3
|
title: Kandinsky 3
|
||||||
|
- local: api/pipelines/kandinsky5_image
|
||||||
|
title: Kandinsky 5.0 Image
|
||||||
- local: api/pipelines/kolors
|
- local: api/pipelines/kolors
|
||||||
title: Kolors
|
title: Kolors
|
||||||
- local: api/pipelines/latent_consistency_models
|
- local: api/pipelines/latent_consistency_models
|
||||||
title: Latent Consistency Models
|
title: Latent Consistency Models
|
||||||
- local: api/pipelines/latent_diffusion
|
- local: api/pipelines/latent_diffusion
|
||||||
title: Latent Diffusion
|
title: Latent Diffusion
|
||||||
- local: api/pipelines/latte
|
|
||||||
title: Latte
|
|
||||||
- local: api/pipelines/ledits_pp
|
- local: api/pipelines/ledits_pp
|
||||||
title: LEDITS++
|
title: LEDITS++
|
||||||
- local: api/pipelines/ltx_video
|
- local: api/pipelines/longcat_image
|
||||||
title: LTXVideo
|
title: LongCat-Image
|
||||||
- local: api/pipelines/lumina2
|
- local: api/pipelines/lumina2
|
||||||
title: Lumina 2.0
|
title: Lumina 2.0
|
||||||
- local: api/pipelines/lumina
|
- local: api/pipelines/lumina
|
||||||
title: Lumina-T2X
|
title: Lumina-T2X
|
||||||
- local: api/pipelines/marigold
|
- local: api/pipelines/marigold
|
||||||
title: Marigold
|
title: Marigold
|
||||||
- local: api/pipelines/mochi
|
|
||||||
title: Mochi
|
|
||||||
- local: api/pipelines/panorama
|
- local: api/pipelines/panorama
|
||||||
title: MultiDiffusion
|
title: MultiDiffusion
|
||||||
- local: api/pipelines/musicldm
|
|
||||||
title: MusicLDM
|
|
||||||
- local: api/pipelines/omnigen
|
- local: api/pipelines/omnigen
|
||||||
title: OmniGen
|
title: OmniGen
|
||||||
|
- local: api/pipelines/ovis_image
|
||||||
|
title: Ovis-Image
|
||||||
- local: api/pipelines/pag
|
- local: api/pipelines/pag
|
||||||
title: PAG
|
title: PAG
|
||||||
- local: api/pipelines/paint_by_example
|
- local: api/pipelines/paint_by_example
|
||||||
title: Paint by Example
|
title: Paint by Example
|
||||||
- local: api/pipelines/pia
|
|
||||||
title: Personalized Image Animator (PIA)
|
|
||||||
- local: api/pipelines/pixart
|
- local: api/pipelines/pixart
|
||||||
title: PixArt-α
|
title: PixArt-α
|
||||||
- local: api/pipelines/pixart_sigma
|
- local: api/pipelines/pixart_sigma
|
||||||
title: PixArt-Σ
|
title: PixArt-Σ
|
||||||
|
- local: api/pipelines/prx
|
||||||
|
title: PRX
|
||||||
- local: api/pipelines/qwenimage
|
- local: api/pipelines/qwenimage
|
||||||
title: QwenImage
|
title: QwenImage
|
||||||
- local: api/pipelines/sana
|
- local: api/pipelines/sana
|
||||||
title: Sana
|
title: Sana
|
||||||
- local: api/pipelines/sana_sprint
|
- local: api/pipelines/sana_sprint
|
||||||
title: Sana Sprint
|
title: Sana Sprint
|
||||||
|
- local: api/pipelines/sana_video
|
||||||
|
title: Sana Video
|
||||||
- local: api/pipelines/self_attention_guidance
|
- local: api/pipelines/self_attention_guidance
|
||||||
title: Self-Attention Guidance
|
title: Self-Attention Guidance
|
||||||
- local: api/pipelines/semantic_stable_diffusion
|
- local: api/pipelines/semantic_stable_diffusion
|
||||||
title: Semantic Guidance
|
title: Semantic Guidance
|
||||||
- local: api/pipelines/shap_e
|
- local: api/pipelines/shap_e
|
||||||
title: Shap-E
|
title: Shap-E
|
||||||
- local: api/pipelines/skyreels_v2
|
|
||||||
title: SkyReels-V2
|
|
||||||
- local: api/pipelines/stable_audio
|
|
||||||
title: Stable Audio
|
|
||||||
- local: api/pipelines/stable_cascade
|
- local: api/pipelines/stable_cascade
|
||||||
title: Stable Cascade
|
title: Stable Cascade
|
||||||
- title: Stable Diffusion
|
- sections:
|
||||||
sections:
|
|
||||||
- local: api/pipelines/stable_diffusion/overview
|
- local: api/pipelines/stable_diffusion/overview
|
||||||
title: Overview
|
title: Overview
|
||||||
- local: api/pipelines/stable_diffusion/depth2img
|
- local: api/pipelines/stable_diffusion/depth2img
|
||||||
@@ -602,8 +619,6 @@
|
|||||||
title: Image variation
|
title: Image variation
|
||||||
- local: api/pipelines/stable_diffusion/img2img
|
- local: api/pipelines/stable_diffusion/img2img
|
||||||
title: Image-to-image
|
title: Image-to-image
|
||||||
- local: api/pipelines/stable_diffusion/svd
|
|
||||||
title: Image-to-video
|
|
||||||
- local: api/pipelines/stable_diffusion/inpaint
|
- local: api/pipelines/stable_diffusion/inpaint
|
||||||
title: Inpainting
|
title: Inpainting
|
||||||
- local: api/pipelines/stable_diffusion/k_diffusion
|
- local: api/pipelines/stable_diffusion/k_diffusion
|
||||||
@@ -611,7 +626,8 @@
|
|||||||
- local: api/pipelines/stable_diffusion/latent_upscale
|
- local: api/pipelines/stable_diffusion/latent_upscale
|
||||||
title: Latent upscaler
|
title: Latent upscaler
|
||||||
- local: api/pipelines/stable_diffusion/ldm3d_diffusion
|
- local: api/pipelines/stable_diffusion/ldm3d_diffusion
|
||||||
title: LDM3D Text-to-(RGB, Depth), Text-to-(RGB-pano, Depth-pano), LDM3D Upscaler
|
title: LDM3D Text-to-(RGB, Depth), Text-to-(RGB-pano, Depth-pano), LDM3D
|
||||||
|
Upscaler
|
||||||
- local: api/pipelines/stable_diffusion/stable_diffusion_safe
|
- local: api/pipelines/stable_diffusion/stable_diffusion_safe
|
||||||
title: Safe Stable Diffusion
|
title: Safe Stable Diffusion
|
||||||
- local: api/pipelines/stable_diffusion/sdxl_turbo
|
- local: api/pipelines/stable_diffusion/sdxl_turbo
|
||||||
@@ -628,12 +644,9 @@
|
|||||||
title: T2I-Adapter
|
title: T2I-Adapter
|
||||||
- local: api/pipelines/stable_diffusion/text2img
|
- local: api/pipelines/stable_diffusion/text2img
|
||||||
title: Text-to-image
|
title: Text-to-image
|
||||||
|
title: Stable Diffusion
|
||||||
- local: api/pipelines/stable_unclip
|
- local: api/pipelines/stable_unclip
|
||||||
title: Stable unCLIP
|
title: Stable unCLIP
|
||||||
- local: api/pipelines/text_to_video
|
|
||||||
title: Text-to-video
|
|
||||||
- local: api/pipelines/text_to_video_zero
|
|
||||||
title: Text2Video-Zero
|
|
||||||
- local: api/pipelines/unclip
|
- local: api/pipelines/unclip
|
||||||
title: unCLIP
|
title: unCLIP
|
||||||
- local: api/pipelines/unidiffuser
|
- local: api/pipelines/unidiffuser
|
||||||
@@ -642,12 +655,53 @@
|
|||||||
title: Value-guided sampling
|
title: Value-guided sampling
|
||||||
- local: api/pipelines/visualcloze
|
- local: api/pipelines/visualcloze
|
||||||
title: VisualCloze
|
title: VisualCloze
|
||||||
- local: api/pipelines/wan
|
|
||||||
title: Wan
|
|
||||||
- local: api/pipelines/wuerstchen
|
- local: api/pipelines/wuerstchen
|
||||||
title: Wuerstchen
|
title: Wuerstchen
|
||||||
- title: Schedulers
|
- local: api/pipelines/z_image
|
||||||
sections:
|
title: Z-Image
|
||||||
|
title: Image
|
||||||
|
- sections:
|
||||||
|
- local: api/pipelines/allegro
|
||||||
|
title: Allegro
|
||||||
|
- local: api/pipelines/chronoedit
|
||||||
|
title: ChronoEdit
|
||||||
|
- local: api/pipelines/cogvideox
|
||||||
|
title: CogVideoX
|
||||||
|
- local: api/pipelines/consisid
|
||||||
|
title: ConsisID
|
||||||
|
- local: api/pipelines/framepack
|
||||||
|
title: Framepack
|
||||||
|
- local: api/pipelines/hunyuan_video
|
||||||
|
title: HunyuanVideo
|
||||||
|
- local: api/pipelines/hunyuan_video15
|
||||||
|
title: HunyuanVideo1.5
|
||||||
|
- local: api/pipelines/i2vgenxl
|
||||||
|
title: I2VGen-XL
|
||||||
|
- local: api/pipelines/kandinsky5_video
|
||||||
|
title: Kandinsky 5.0 Video
|
||||||
|
- local: api/pipelines/latte
|
||||||
|
title: Latte
|
||||||
|
- local: api/pipelines/ltx2
|
||||||
|
title: LTX-2
|
||||||
|
- local: api/pipelines/ltx_video
|
||||||
|
title: LTXVideo
|
||||||
|
- local: api/pipelines/mochi
|
||||||
|
title: Mochi
|
||||||
|
- local: api/pipelines/pia
|
||||||
|
title: Personalized Image Animator (PIA)
|
||||||
|
- local: api/pipelines/skyreels_v2
|
||||||
|
title: SkyReels-V2
|
||||||
|
- local: api/pipelines/stable_diffusion/svd
|
||||||
|
title: Stable Video Diffusion
|
||||||
|
- local: api/pipelines/text_to_video
|
||||||
|
title: Text-to-video
|
||||||
|
- local: api/pipelines/text_to_video_zero
|
||||||
|
title: Text2Video-Zero
|
||||||
|
- local: api/pipelines/wan
|
||||||
|
title: Wan
|
||||||
|
title: Video
|
||||||
|
title: Pipelines
|
||||||
|
- sections:
|
||||||
- local: api/schedulers/overview
|
- local: api/schedulers/overview
|
||||||
title: Overview
|
title: Overview
|
||||||
- local: api/schedulers/cm_stochastic_iterative
|
- local: api/schedulers/cm_stochastic_iterative
|
||||||
@@ -716,8 +770,8 @@
|
|||||||
title: UniPCMultistepScheduler
|
title: UniPCMultistepScheduler
|
||||||
- local: api/schedulers/vq_diffusion
|
- local: api/schedulers/vq_diffusion
|
||||||
title: VQDiffusionScheduler
|
title: VQDiffusionScheduler
|
||||||
- title: Internal classes
|
title: Schedulers
|
||||||
sections:
|
- sections:
|
||||||
- local: api/internal_classes_overview
|
- local: api/internal_classes_overview
|
||||||
title: Overview
|
title: Overview
|
||||||
- local: api/attnprocessor
|
- local: api/attnprocessor
|
||||||
@@ -734,3 +788,5 @@
|
|||||||
title: VAE Image Processor
|
title: VAE Image Processor
|
||||||
- local: api/video_processor
|
- local: api/video_processor
|
||||||
title: Video Processor
|
title: Video Processor
|
||||||
|
title: Internal classes
|
||||||
|
title: API
|
||||||
|
|||||||
@@ -29,8 +29,14 @@ Cache methods speedup diffusion transformers by storing and reusing intermediate
|
|||||||
|
|
||||||
[[autodoc]] apply_faster_cache
|
[[autodoc]] apply_faster_cache
|
||||||
|
|
||||||
### FirstBlockCacheConfig
|
## FirstBlockCacheConfig
|
||||||
|
|
||||||
[[autodoc]] FirstBlockCacheConfig
|
[[autodoc]] FirstBlockCacheConfig
|
||||||
|
|
||||||
[[autodoc]] apply_first_block_cache
|
[[autodoc]] apply_first_block_cache
|
||||||
|
|
||||||
|
### TaylorSeerCacheConfig
|
||||||
|
|
||||||
|
[[autodoc]] TaylorSeerCacheConfig
|
||||||
|
|
||||||
|
[[autodoc]] apply_taylorseer_cache
|
||||||
|
|||||||
@@ -14,11 +14,8 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
Schedulers from [`~schedulers.scheduling_utils.SchedulerMixin`] and models from [`ModelMixin`] inherit from [`ConfigMixin`] which stores all the parameters that are passed to their respective `__init__` methods in a JSON-configuration file.
|
Schedulers from [`~schedulers.scheduling_utils.SchedulerMixin`] and models from [`ModelMixin`] inherit from [`ConfigMixin`] which stores all the parameters that are passed to their respective `__init__` methods in a JSON-configuration file.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf auth login`.
|
||||||
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf auth login`.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## ConfigMixin
|
## ConfigMixin
|
||||||
|
|
||||||
|
|||||||
@@ -20,6 +20,12 @@ All pipelines with [`VaeImageProcessor`] accept PIL Image, PyTorch tensor, or Nu
|
|||||||
|
|
||||||
[[autodoc]] image_processor.VaeImageProcessor
|
[[autodoc]] image_processor.VaeImageProcessor
|
||||||
|
|
||||||
|
## InpaintProcessor
|
||||||
|
|
||||||
|
The [`InpaintProcessor`] accepts `mask` and `image` inputs and process them together. Optionally, it can accept padding_mask_crop and apply mask overlay.
|
||||||
|
|
||||||
|
[[autodoc]] image_processor.InpaintProcessor
|
||||||
|
|
||||||
## VaeImageProcessorLDM3D
|
## VaeImageProcessorLDM3D
|
||||||
|
|
||||||
The [`VaeImageProcessorLDM3D`] accepts RGB and depth inputs and returns RGB and depth outputs.
|
The [`VaeImageProcessorLDM3D`] accepts RGB and depth inputs and returns RGB and depth outputs.
|
||||||
|
|||||||
@@ -14,11 +14,8 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
[IP-Adapter](https://hf.co/papers/2308.06721) is a lightweight adapter that enables prompting a diffusion model with an image. This method decouples the cross-attention layers of the image and text features. The image features are generated from an image encoder.
|
[IP-Adapter](https://hf.co/papers/2308.06721) is a lightweight adapter that enables prompting a diffusion model with an image. This method decouples the cross-attention layers of the image and text features. The image features are generated from an image encoder.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Learn how to load and use an IP-Adapter checkpoint and image in the [IP-Adapter](../../using-diffusers/ip_adapter) guide,.
|
||||||
Learn how to load an IP-Adapter checkpoint and image in the IP-Adapter [loading](../../using-diffusers/loading_adapters#ip-adapter) guide, and you can see how to use it in the [usage](../../using-diffusers/ip_adapter) guide.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## IPAdapterMixin
|
## IPAdapterMixin
|
||||||
|
|
||||||
|
|||||||
@@ -30,14 +30,14 @@ LoRA is a fast and lightweight training method that inserts and trains a signifi
|
|||||||
- [`CogView4LoraLoaderMixin`] provides similar functions for [CogView4](https://huggingface.co/docs/diffusers/main/en/api/pipelines/cogview4).
|
- [`CogView4LoraLoaderMixin`] provides similar functions for [CogView4](https://huggingface.co/docs/diffusers/main/en/api/pipelines/cogview4).
|
||||||
- [`AmusedLoraLoaderMixin`] is for the [`AmusedPipeline`].
|
- [`AmusedLoraLoaderMixin`] is for the [`AmusedPipeline`].
|
||||||
- [`HiDreamImageLoraLoaderMixin`] provides similar functions for [HiDream Image](https://huggingface.co/docs/diffusers/main/en/api/pipelines/hidream)
|
- [`HiDreamImageLoraLoaderMixin`] provides similar functions for [HiDream Image](https://huggingface.co/docs/diffusers/main/en/api/pipelines/hidream)
|
||||||
- [`QwenImageLoraLoaderMixin`] provides similar functions for [Qwen Image](https://huggingface.co/docs/diffusers/main/en/api/pipelines/qwen)
|
- [`QwenImageLoraLoaderMixin`] provides similar functions for [Qwen Image](https://huggingface.co/docs/diffusers/main/en/api/pipelines/qwen).
|
||||||
|
- [`ZImageLoraLoaderMixin`] provides similar functions for [Z-Image](https://huggingface.co/docs/diffusers/main/en/api/pipelines/zimage).
|
||||||
|
- [`Flux2LoraLoaderMixin`] provides similar functions for [Flux2](https://huggingface.co/docs/diffusers/main/en/api/pipelines/flux2).
|
||||||
|
- [`LTX2LoraLoaderMixin`] provides similar functions for [Flux2](https://huggingface.co/docs/diffusers/main/en/api/pipelines/ltx2).
|
||||||
- [`LoraBaseMixin`] provides a base class with several utility methods to fuse, unfuse, unload, LoRAs and more.
|
- [`LoraBaseMixin`] provides a base class with several utility methods to fuse, unfuse, unload, LoRAs and more.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> To learn more about how to load LoRA weights, see the [LoRA](../../tutorials/using_peft_for_inference) loading guide.
|
||||||
To learn more about how to load LoRA weights, see the [LoRA](../../using-diffusers/loading_adapters#lora) loading guide.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## LoraBaseMixin
|
## LoraBaseMixin
|
||||||
|
|
||||||
@@ -59,6 +59,14 @@ To learn more about how to load LoRA weights, see the [LoRA](../../using-diffuse
|
|||||||
|
|
||||||
[[autodoc]] loaders.lora_pipeline.FluxLoraLoaderMixin
|
[[autodoc]] loaders.lora_pipeline.FluxLoraLoaderMixin
|
||||||
|
|
||||||
|
## Flux2LoraLoaderMixin
|
||||||
|
|
||||||
|
[[autodoc]] loaders.lora_pipeline.Flux2LoraLoaderMixin
|
||||||
|
|
||||||
|
## LTX2LoraLoaderMixin
|
||||||
|
|
||||||
|
[[autodoc]] loaders.lora_pipeline.LTX2LoraLoaderMixin
|
||||||
|
|
||||||
## CogVideoXLoraLoaderMixin
|
## CogVideoXLoraLoaderMixin
|
||||||
|
|
||||||
[[autodoc]] loaders.lora_pipeline.CogVideoXLoraLoaderMixin
|
[[autodoc]] loaders.lora_pipeline.CogVideoXLoraLoaderMixin
|
||||||
@@ -110,6 +118,13 @@ To learn more about how to load LoRA weights, see the [LoRA](../../using-diffuse
|
|||||||
|
|
||||||
[[autodoc]] loaders.lora_pipeline.QwenImageLoraLoaderMixin
|
[[autodoc]] loaders.lora_pipeline.QwenImageLoraLoaderMixin
|
||||||
|
|
||||||
|
## ZImageLoraLoaderMixin
|
||||||
|
|
||||||
|
[[autodoc]] loaders.lora_pipeline.ZImageLoraLoaderMixin
|
||||||
|
|
||||||
|
## KandinskyLoraLoaderMixin
|
||||||
|
[[autodoc]] loaders.lora_pipeline.KandinskyLoraLoaderMixin
|
||||||
|
|
||||||
## LoraBaseMixin
|
## LoraBaseMixin
|
||||||
|
|
||||||
[[autodoc]] loaders.lora_base.LoraBaseMixin
|
[[autodoc]] loaders.lora_base.LoraBaseMixin
|
||||||
@@ -12,13 +12,10 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
# PEFT
|
# PEFT
|
||||||
|
|
||||||
Diffusers supports loading adapters such as [LoRA](../../using-diffusers/loading_adapters) with the [PEFT](https://huggingface.co/docs/peft/index) library with the [`~loaders.peft.PeftAdapterMixin`] class. This allows modeling classes in Diffusers like [`UNet2DConditionModel`], [`SD3Transformer2DModel`] to operate with an adapter.
|
Diffusers supports loading adapters such as [LoRA](../../tutorials/using_peft_for_inference) with the [PEFT](https://huggingface.co/docs/peft/index) library with the [`~loaders.peft.PeftAdapterMixin`] class. This allows modeling classes in Diffusers like [`UNet2DConditionModel`], [`SD3Transformer2DModel`] to operate with an adapter.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Refer to the [Inference with PEFT](../../tutorials/using_peft_for_inference.md) tutorial for an overview of how to use PEFT in Diffusers for inference.
|
||||||
Refer to the [Inference with PEFT](../../tutorials/using_peft_for_inference.md) tutorial for an overview of how to use PEFT in Diffusers for inference.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## PeftAdapterMixin
|
## PeftAdapterMixin
|
||||||
|
|
||||||
|
|||||||
@@ -16,11 +16,8 @@ Textual Inversion is a training method for personalizing models by learning new
|
|||||||
|
|
||||||
[`TextualInversionLoaderMixin`] provides a function for loading Textual Inversion embeddings from Diffusers and Automatic1111 into the text encoder and loading a special token to activate the embeddings.
|
[`TextualInversionLoaderMixin`] provides a function for loading Textual Inversion embeddings from Diffusers and Automatic1111 into the text encoder and loading a special token to activate the embeddings.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> To learn more about how to load Textual Inversion embeddings, see the [Textual Inversion](../../using-diffusers/textual_inversion_inference) loading guide.
|
||||||
To learn more about how to load Textual Inversion embeddings, see the [Textual Inversion](../../using-diffusers/loading_adapters#textual-inversion) loading guide.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## TextualInversionLoaderMixin
|
## TextualInversionLoaderMixin
|
||||||
|
|
||||||
|
|||||||
@@ -16,11 +16,8 @@ This class is useful when *only* loading weights into a [`SD3Transformer2DModel`
|
|||||||
|
|
||||||
The [`SD3Transformer2DLoadersMixin`] class currently only loads IP-Adapter weights, but will be used in the future to save weights and load LoRAs.
|
The [`SD3Transformer2DLoadersMixin`] class currently only loads IP-Adapter weights, but will be used in the future to save weights and load LoRAs.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> To learn more about how to load LoRA weights, see the [LoRA](../../tutorials/using_peft_for_inference) loading guide.
|
||||||
To learn more about how to load LoRA weights, see the [LoRA](../../using-diffusers/loading_adapters#lora) loading guide.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## SD3Transformer2DLoadersMixin
|
## SD3Transformer2DLoadersMixin
|
||||||
|
|
||||||
|
|||||||
@@ -16,11 +16,8 @@ Some training methods - like LoRA and Custom Diffusion - typically target the UN
|
|||||||
|
|
||||||
The [`UNet2DConditionLoadersMixin`] class provides functions for loading and saving weights, fusing and unfusing LoRAs, disabling and enabling LoRAs, and setting and deleting adapters.
|
The [`UNet2DConditionLoadersMixin`] class provides functions for loading and saving weights, fusing and unfusing LoRAs, disabling and enabling LoRAs, and setting and deleting adapters.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> To learn more about how to load LoRA weights, see the [LoRA](../../tutorials/using_peft_for_inference) guide.
|
||||||
To learn more about how to load LoRA weights, see the [LoRA](../../using-diffusers/loading_adapters#lora) loading guide.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## UNet2DConditionLoadersMixin
|
## UNet2DConditionLoadersMixin
|
||||||
|
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ mask_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images
|
|||||||
original_image = load_image(img_url).resize((512, 512))
|
original_image = load_image(img_url).resize((512, 512))
|
||||||
mask_image = load_image(mask_url).resize((512, 512))
|
mask_image = load_image(mask_url).resize((512, 512))
|
||||||
|
|
||||||
pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
|
pipe = StableDiffusionInpaintPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-inpainting")
|
||||||
pipe.vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5")
|
pipe.vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5")
|
||||||
pipe.to("cuda")
|
pipe.to("cuda")
|
||||||
|
|
||||||
|
|||||||
@@ -12,15 +12,7 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
# AutoModel
|
# AutoModel
|
||||||
|
|
||||||
The `AutoModel` is designed to make it easy to load a checkpoint without needing to know the specific model class. `AutoModel` automatically retrieves the correct model class from the checkpoint `config.json` file.
|
[`AutoModel`] automatically retrieves the correct model class from the checkpoint `config.json` file.
|
||||||
|
|
||||||
```python
|
|
||||||
from diffusers import AutoModel, AutoPipelineForText2Image
|
|
||||||
|
|
||||||
unet = AutoModel.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="unet")
|
|
||||||
pipe = AutoPipelineForText2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", unet=unet)
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## AutoModel
|
## AutoModel
|
||||||
|
|
||||||
|
|||||||
36
docs/source/en/api/models/autoencoder_kl_hunyuan_video15.md
Normal file
36
docs/source/en/api/models/autoencoder_kl_hunyuan_video15.md
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# AutoencoderKLHunyuanVideo15
|
||||||
|
|
||||||
|
The 3D variational autoencoder (VAE) model with KL loss used in [HunyuanVideo1.5](https://github.com/Tencent/HunyuanVideo1-1.5) by Tencent.
|
||||||
|
|
||||||
|
The model can be loaded with the following code snippet.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import AutoencoderKLHunyuanVideo15
|
||||||
|
|
||||||
|
vae = AutoencoderKLHunyuanVideo15.from_pretrained("hunyuanvideo-community/HunyuanVideo-1.5-Diffusers-480p_t2v", subfolder="vae", torch_dtype=torch.float32)
|
||||||
|
|
||||||
|
# make sure to enable tiling to avoid OOM
|
||||||
|
vae.enable_tiling()
|
||||||
|
```
|
||||||
|
|
||||||
|
## AutoencoderKLHunyuanVideo15
|
||||||
|
|
||||||
|
[[autodoc]] AutoencoderKLHunyuanVideo15
|
||||||
|
- decode
|
||||||
|
- encode
|
||||||
|
- all
|
||||||
|
|
||||||
|
## DecoderOutput
|
||||||
|
|
||||||
|
[[autodoc]] models.autoencoders.vae.DecoderOutput
|
||||||
32
docs/source/en/api/models/autoencoder_kl_hunyuanimage.md
Normal file
32
docs/source/en/api/models/autoencoder_kl_hunyuanimage.md
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# AutoencoderKLHunyuanImage
|
||||||
|
|
||||||
|
The 2D variational autoencoder (VAE) model with KL loss used in [HunyuanImage2.1].
|
||||||
|
|
||||||
|
The model can be loaded with the following code snippet.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import AutoencoderKLHunyuanImage
|
||||||
|
|
||||||
|
vae = AutoencoderKLHunyuanImage.from_pretrained("hunyuanvideo-community/HunyuanImage-2.1-Diffusers", subfolder="vae", torch_dtype=torch.bfloat16)
|
||||||
|
```
|
||||||
|
|
||||||
|
## AutoencoderKLHunyuanImage
|
||||||
|
|
||||||
|
[[autodoc]] AutoencoderKLHunyuanImage
|
||||||
|
- decode
|
||||||
|
- all
|
||||||
|
|
||||||
|
## DecoderOutput
|
||||||
|
|
||||||
|
[[autodoc]] models.autoencoders.vae.DecoderOutput
|
||||||
@@ -0,0 +1,32 @@
|
|||||||
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# AutoencoderKLHunyuanImageRefiner
|
||||||
|
|
||||||
|
The 3D variational autoencoder (VAE) model with KL loss used in [HunyuanImage2.1](https://github.com/Tencent-Hunyuan/HunyuanImage-2.1) for its refiner pipeline.
|
||||||
|
|
||||||
|
The model can be loaded with the following code snippet.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import AutoencoderKLHunyuanImageRefiner
|
||||||
|
|
||||||
|
vae = AutoencoderKLHunyuanImageRefiner.from_pretrained("hunyuanvideo-community/HunyuanImage-2.1-Refiner-Diffusers", subfolder="vae", torch_dtype=torch.bfloat16)
|
||||||
|
```
|
||||||
|
|
||||||
|
## AutoencoderKLHunyuanImageRefiner
|
||||||
|
|
||||||
|
[[autodoc]] AutoencoderKLHunyuanImageRefiner
|
||||||
|
- decode
|
||||||
|
- all
|
||||||
|
|
||||||
|
## DecoderOutput
|
||||||
|
|
||||||
|
[[autodoc]] models.autoencoders.vae.DecoderOutput
|
||||||
29
docs/source/en/api/models/autoencoderkl_audio_ltx_2.md
Normal file
29
docs/source/en/api/models/autoencoderkl_audio_ltx_2.md
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# AutoencoderKLLTX2Audio
|
||||||
|
|
||||||
|
The 3D variational autoencoder (VAE) model with KL loss used in [LTX-2](https://huggingface.co/Lightricks/LTX-2) was introduced by Lightricks. This is for encoding and decoding audio latent representations.
|
||||||
|
|
||||||
|
The model can be loaded with the following code snippet.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import AutoencoderKLLTX2Audio
|
||||||
|
|
||||||
|
vae = AutoencoderKLLTX2Audio.from_pretrained("Lightricks/LTX-2", subfolder="vae", torch_dtype=torch.float32).to("cuda")
|
||||||
|
```
|
||||||
|
|
||||||
|
## AutoencoderKLLTX2Audio
|
||||||
|
|
||||||
|
[[autodoc]] AutoencoderKLLTX2Audio
|
||||||
|
- encode
|
||||||
|
- decode
|
||||||
|
- all
|
||||||
29
docs/source/en/api/models/autoencoderkl_ltx_2.md
Normal file
29
docs/source/en/api/models/autoencoderkl_ltx_2.md
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# AutoencoderKLLTX2Video
|
||||||
|
|
||||||
|
The 3D variational autoencoder (VAE) model with KL loss used in [LTX-2](https://huggingface.co/Lightricks/LTX-2) was introduced by Lightricks.
|
||||||
|
|
||||||
|
The model can be loaded with the following code snippet.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import AutoencoderKLLTX2Video
|
||||||
|
|
||||||
|
vae = AutoencoderKLLTX2Video.from_pretrained("Lightricks/LTX-2", subfolder="vae", torch_dtype=torch.float32).to("cuda")
|
||||||
|
```
|
||||||
|
|
||||||
|
## AutoencoderKLLTX2Video
|
||||||
|
|
||||||
|
[[autodoc]] AutoencoderKLLTX2Video
|
||||||
|
- decode
|
||||||
|
- encode
|
||||||
|
- all
|
||||||
@@ -12,7 +12,7 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
# ChromaTransformer2DModel
|
# ChromaTransformer2DModel
|
||||||
|
|
||||||
A modified flux Transformer model from [Chroma](https://huggingface.co/lodestones/Chroma)
|
A modified flux Transformer model from [Chroma](https://huggingface.co/lodestones/Chroma1-HD)
|
||||||
|
|
||||||
## ChromaTransformer2DModel
|
## ChromaTransformer2DModel
|
||||||
|
|
||||||
|
|||||||
32
docs/source/en/api/models/chronoedit_transformer_3d.md
Normal file
32
docs/source/en/api/models/chronoedit_transformer_3d.md
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
<!-- Copyright 2025 The ChronoEdit Team and HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# ChronoEditTransformer3DModel
|
||||||
|
|
||||||
|
A Diffusion Transformer model for 3D video-like data from [ChronoEdit: Towards Temporal Reasoning for Image Editing and World Simulation](https://huggingface.co/papers/2510.04290) from NVIDIA and University of Toronto, by Jay Zhangjie Wu, Xuanchi Ren, Tianchang Shen, Tianshi Cao, Kai He, Yifan Lu, Ruiyuan Gao, Enze Xie, Shiyi Lan, Jose M. Alvarez, Jun Gao, Sanja Fidler, Zian Wang, Huan Ling.
|
||||||
|
|
||||||
|
> **TL;DR:** ChronoEdit reframes image editing as a video generation task, using input and edited images as start/end frames to leverage pretrained video models with temporal consistency. A temporal reasoning stage introduces reasoning tokens to ensure physically plausible edits and visualize the editing trajectory.
|
||||||
|
|
||||||
|
The model can be loaded with the following code snippet.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import ChronoEditTransformer3DModel
|
||||||
|
|
||||||
|
transformer = ChronoEditTransformer3DModel.from_pretrained("nvidia/ChronoEdit-14B-Diffusers", subfolder="transformer", torch_dtype=torch.bfloat16)
|
||||||
|
```
|
||||||
|
|
||||||
|
## ChronoEditTransformer3DModel
|
||||||
|
|
||||||
|
[[autodoc]] ChronoEditTransformer3DModel
|
||||||
|
|
||||||
|
## Transformer2DModelOutput
|
||||||
|
|
||||||
|
[[autodoc]] models.modeling_outputs.Transformer2DModelOutput
|
||||||
@@ -16,11 +16,8 @@ Consistency decoder can be used to decode the latents from the denoising UNet in
|
|||||||
|
|
||||||
The original codebase can be found at [openai/consistencydecoder](https://github.com/openai/consistencydecoder).
|
The original codebase can be found at [openai/consistencydecoder](https://github.com/openai/consistencydecoder).
|
||||||
|
|
||||||
<Tip warning={true}>
|
> [!WARNING]
|
||||||
|
> Inference is only supported for 2 iterations as of now.
|
||||||
Inference is only supported for 2 iterations as of now.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
The pipeline could not have been contributed without the help of [madebyollin](https://github.com/madebyollin) and [mrsteyk](https://github.com/mrsteyk) from [this issue](https://github.com/openai/consistencydecoder/issues/1).
|
The pipeline could not have been contributed without the help of [madebyollin](https://github.com/madebyollin) and [mrsteyk](https://github.com/mrsteyk) from [this issue](https://github.com/openai/consistencydecoder/issues/1).
|
||||||
|
|
||||||
|
|||||||
@@ -33,6 +33,21 @@ url = "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/m
|
|||||||
pipe = StableDiffusionControlNetPipeline.from_single_file(url, controlnet=controlnet)
|
pipe = StableDiffusionControlNetPipeline.from_single_file(url, controlnet=controlnet)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Loading from Control LoRA
|
||||||
|
|
||||||
|
Control-LoRA is introduced by Stability AI in [stabilityai/control-lora](https://huggingface.co/stabilityai/control-lora) by adding low-rank parameter efficient fine tuning to ControlNet. This approach offers a more efficient and compact method to bring model control to a wider variety of consumer GPUs.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers import ControlNetModel, UNet2DConditionModel
|
||||||
|
|
||||||
|
lora_id = "stabilityai/control-lora"
|
||||||
|
lora_filename = "control-LoRAs-rank128/control-lora-canny-rank128.safetensors"
|
||||||
|
|
||||||
|
unet = UNet2DConditionModel.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="unet", torch_dtype=torch.bfloat16).to("cuda")
|
||||||
|
controlnet = ControlNetModel.from_unet(unet).to(device="cuda", dtype=torch.bfloat16)
|
||||||
|
controlnet.load_lora_adapter(lora_id, weight_name=lora_filename, prefix=None, controlnet_config=controlnet.config)
|
||||||
|
```
|
||||||
|
|
||||||
## ControlNetModel
|
## ControlNetModel
|
||||||
|
|
||||||
[[autodoc]] ControlNetModel
|
[[autodoc]] ControlNetModel
|
||||||
|
|||||||
@@ -42,4 +42,4 @@ pipe = FluxControlNetPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", co
|
|||||||
|
|
||||||
## FluxControlNetOutput
|
## FluxControlNetOutput
|
||||||
|
|
||||||
[[autodoc]] models.controlnet_flux.FluxControlNetOutput
|
[[autodoc]] models.controlnets.controlnet_flux.FluxControlNetOutput
|
||||||
@@ -43,4 +43,4 @@ controlnet = SparseControlNetModel.from_pretrained("guoyww/animatediff-sparsectr
|
|||||||
|
|
||||||
## SparseControlNetOutput
|
## SparseControlNetOutput
|
||||||
|
|
||||||
[[autodoc]] models.controlnet_sparsectrl.SparseControlNetOutput
|
[[autodoc]] models.controlnets.controlnet_sparsectrl.SparseControlNetOutput
|
||||||
|
|||||||
19
docs/source/en/api/models/flux2_transformer.md
Normal file
19
docs/source/en/api/models/flux2_transformer.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Flux2Transformer2DModel
|
||||||
|
|
||||||
|
A Transformer model for image-like data from [Flux2](https://hf.co/black-forest-labs/FLUX.2-dev).
|
||||||
|
|
||||||
|
## Flux2Transformer2DModel
|
||||||
|
|
||||||
|
[[autodoc]] Flux2Transformer2DModel
|
||||||
18
docs/source/en/api/models/glm_image_transformer2d.md
Normal file
18
docs/source/en/api/models/glm_image_transformer2d.md
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# GlmImageTransformer2DModel
|
||||||
|
|
||||||
|
A Diffusion Transformer model for 2D data from [GlmImageTransformer2DModel] (TODO).
|
||||||
|
|
||||||
|
## GlmImageTransformer2DModel
|
||||||
|
|
||||||
|
[[autodoc]] GlmImageTransformer2DModel
|
||||||
30
docs/source/en/api/models/hunyuan_video15_transformer_3d.md
Normal file
30
docs/source/en/api/models/hunyuan_video15_transformer_3d.md
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# HunyuanVideo15Transformer3DModel
|
||||||
|
|
||||||
|
A Diffusion Transformer model for 3D video-like data used in [HunyuanVideo1.5](https://github.com/Tencent/HunyuanVideo1-1.5).
|
||||||
|
|
||||||
|
The model can be loaded with the following code snippet.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import HunyuanVideo15Transformer3DModel
|
||||||
|
|
||||||
|
transformer = HunyuanVideo15Transformer3DModel.from_pretrained("hunyuanvideo-community/HunyuanVideo-1.5-Diffusers-480p_t2v" subfolder="transformer", torch_dtype=torch.bfloat16)
|
||||||
|
```
|
||||||
|
|
||||||
|
## HunyuanVideo15Transformer3DModel
|
||||||
|
|
||||||
|
[[autodoc]] HunyuanVideo15Transformer3DModel
|
||||||
|
|
||||||
|
## Transformer2DModelOutput
|
||||||
|
|
||||||
|
[[autodoc]] models.modeling_outputs.Transformer2DModelOutput
|
||||||
30
docs/source/en/api/models/hunyuanimage_transformer_2d.md
Normal file
30
docs/source/en/api/models/hunyuanimage_transformer_2d.md
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# HunyuanImageTransformer2DModel
|
||||||
|
|
||||||
|
A Diffusion Transformer model for [HunyuanImage2.1](https://github.com/Tencent-Hunyuan/HunyuanImage-2.1).
|
||||||
|
|
||||||
|
The model can be loaded with the following code snippet.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import HunyuanImageTransformer2DModel
|
||||||
|
|
||||||
|
transformer = HunyuanImageTransformer2DModel.from_pretrained("hunyuanvideo-community/HunyuanImage-2.1-Diffusers", subfolder="transformer", torch_dtype=torch.bfloat16)
|
||||||
|
```
|
||||||
|
|
||||||
|
## HunyuanImageTransformer2DModel
|
||||||
|
|
||||||
|
[[autodoc]] HunyuanImageTransformer2DModel
|
||||||
|
|
||||||
|
## Transformer2DModelOutput
|
||||||
|
|
||||||
|
[[autodoc]] models.modeling_outputs.Transformer2DModelOutput
|
||||||
25
docs/source/en/api/models/longcat_image_transformer2d.md
Normal file
25
docs/source/en/api/models/longcat_image_transformer2d.md
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# LongCatImageTransformer2DModel
|
||||||
|
|
||||||
|
The model can be loaded with the following code snippet.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import LongCatImageTransformer2DModel
|
||||||
|
|
||||||
|
transformer = LongCatImageTransformer2DModel.from_pretrained("meituan-longcat/LongCat-Image ", subfolder="transformer", torch_dtype=torch.bfloat16)
|
||||||
|
```
|
||||||
|
|
||||||
|
## LongCatImageTransformer2DModel
|
||||||
|
|
||||||
|
[[autodoc]] LongCatImageTransformer2DModel
|
||||||
26
docs/source/en/api/models/ltx2_video_transformer3d.md
Normal file
26
docs/source/en/api/models/ltx2_video_transformer3d.md
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# LTX2VideoTransformer3DModel
|
||||||
|
|
||||||
|
A Diffusion Transformer model for 3D data from [LTX](https://huggingface.co/Lightricks/LTX-2) was introduced by Lightricks.
|
||||||
|
|
||||||
|
The model can be loaded with the following code snippet.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import LTX2VideoTransformer3DModel
|
||||||
|
|
||||||
|
transformer = LTX2VideoTransformer3DModel.from_pretrained("Lightricks/LTX-2", subfolder="transformer", torch_dtype=torch.bfloat16).to("cuda")
|
||||||
|
```
|
||||||
|
|
||||||
|
## LTX2VideoTransformer3DModel
|
||||||
|
|
||||||
|
[[autodoc]] LTX2VideoTransformer3DModel
|
||||||
24
docs/source/en/api/models/ovisimage_transformer2d.md
Normal file
24
docs/source/en/api/models/ovisimage_transformer2d.md
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# OvisImageTransformer2DModel
|
||||||
|
|
||||||
|
The model can be loaded with the following code snippet.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import OvisImageTransformer2DModel
|
||||||
|
|
||||||
|
transformer = OvisImageTransformer2DModel.from_pretrained("AIDC-AI/Ovis-Image-7B", subfolder="transformer", torch_dtype=torch.bfloat16)
|
||||||
|
```
|
||||||
|
|
||||||
|
## OvisImageTransformer2DModel
|
||||||
|
|
||||||
|
[[autodoc]] OvisImageTransformer2DModel
|
||||||
36
docs/source/en/api/models/sana_video_transformer3d.md
Normal file
36
docs/source/en/api/models/sana_video_transformer3d.md
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
<!-- Copyright 2025 The SANA-Video Authors and HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# SanaVideoTransformer3DModel
|
||||||
|
|
||||||
|
A Diffusion Transformer model for 3D data (video) from [SANA-Video: Efficient Video Generation with Block Linear Diffusion Transformer](https://huggingface.co/papers/2509.24695) from NVIDIA and MIT HAN Lab, by Junsong Chen, Yuyang Zhao, Jincheng Yu, Ruihang Chu, Junyu Chen, Shuai Yang, Xianbang Wang, Yicheng Pan, Daquan Zhou, Huan Ling, Haozhe Liu, Hongwei Yi, Hao Zhang, Muyang Li, Yukang Chen, Han Cai, Sanja Fidler, Ping Luo, Song Han, Enze Xie.
|
||||||
|
|
||||||
|
The abstract from the paper is:
|
||||||
|
|
||||||
|
*We introduce SANA-Video, a small diffusion model that can efficiently generate videos up to 720x1280 resolution and minute-length duration. SANA-Video synthesizes high-resolution, high-quality and long videos with strong text-video alignment at a remarkably fast speed, deployable on RTX 5090 GPU. Two core designs ensure our efficient, effective and long video generation: (1) Linear DiT: We leverage linear attention as the core operation, which is more efficient than vanilla attention given the large number of tokens processed in video generation. (2) Constant-Memory KV cache for Block Linear Attention: we design block-wise autoregressive approach for long video generation by employing a constant-memory state, derived from the cumulative properties of linear attention. This KV cache provides the Linear DiT with global context at a fixed memory cost, eliminating the need for a traditional KV cache and enabling efficient, minute-long video generation. In addition, we explore effective data filters and model training strategies, narrowing the training cost to 12 days on 64 H100 GPUs, which is only 1% of the cost of MovieGen. Given its low cost, SANA-Video achieves competitive performance compared to modern state-of-the-art small diffusion models (e.g., Wan 2.1-1.3B and SkyReel-V2-1.3B) while being 16x faster in measured latency. Moreover, SANA-Video can be deployed on RTX 5090 GPUs with NVFP4 precision, accelerating the inference speed of generating a 5-second 720p video from 71s to 29s (2.4x speedup). In summary, SANA-Video enables low-cost, high-quality video generation.*
|
||||||
|
|
||||||
|
The model can be loaded with the following code snippet.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import SanaVideoTransformer3DModel
|
||||||
|
import torch
|
||||||
|
|
||||||
|
transformer = SanaVideoTransformer3DModel.from_pretrained("Efficient-Large-Model/SANA-Video_2B_480p_diffusers", subfolder="transformer", torch_dtype=torch.bfloat16)
|
||||||
|
```
|
||||||
|
|
||||||
|
## SanaVideoTransformer3DModel
|
||||||
|
|
||||||
|
[[autodoc]] SanaVideoTransformer3DModel
|
||||||
|
|
||||||
|
## Transformer2DModelOutput
|
||||||
|
|
||||||
|
[[autodoc]] models.modeling_outputs.Transformer2DModelOutput
|
||||||
|
|
||||||
@@ -22,11 +22,8 @@ When the input is **continuous**:
|
|||||||
|
|
||||||
When the input is **discrete**:
|
When the input is **discrete**:
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> It is assumed one of the input classes is the masked latent pixel. The predicted classes of the unnoised image don't contain a prediction for the masked pixel because the unnoised image cannot be masked.
|
||||||
It is assumed one of the input classes is the masked latent pixel. The predicted classes of the unnoised image don't contain a prediction for the masked pixel because the unnoised image cannot be masked.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
1. Convert input (classes of latent pixels) to embeddings and apply positional embeddings.
|
1. Convert input (classes of latent pixels) to embeddings and apply positional embeddings.
|
||||||
2. Apply the Transformer blocks in the standard way.
|
2. Apply the Transformer blocks in the standard way.
|
||||||
|
|||||||
19
docs/source/en/api/models/transformer_bria_fibo.md
Normal file
19
docs/source/en/api/models/transformer_bria_fibo.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# BriaFiboTransformer2DModel
|
||||||
|
|
||||||
|
A modified flux Transformer model from [Bria](https://huggingface.co/briaai/FIBO)
|
||||||
|
|
||||||
|
## BriaFiboTransformer2DModel
|
||||||
|
|
||||||
|
[[autodoc]] BriaFiboTransformer2DModel
|
||||||
30
docs/source/en/api/models/wan_animate_transformer_3d.md
Normal file
30
docs/source/en/api/models/wan_animate_transformer_3d.md
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# WanAnimateTransformer3DModel
|
||||||
|
|
||||||
|
A Diffusion Transformer model for 3D video-like data was introduced in [Wan Animate](https://github.com/Wan-Video/Wan2.2) by the Alibaba Wan Team.
|
||||||
|
|
||||||
|
The model can be loaded with the following code snippet.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from diffusers import WanAnimateTransformer3DModel
|
||||||
|
|
||||||
|
transformer = WanAnimateTransformer3DModel.from_pretrained("Wan-AI/Wan2.2-Animate-14B-Diffusers", subfolder="transformer", torch_dtype=torch.bfloat16)
|
||||||
|
```
|
||||||
|
|
||||||
|
## WanAnimateTransformer3DModel
|
||||||
|
|
||||||
|
[[autodoc]] WanAnimateTransformer3DModel
|
||||||
|
|
||||||
|
## Transformer2DModelOutput
|
||||||
|
|
||||||
|
[[autodoc]] models.modeling_outputs.Transformer2DModelOutput
|
||||||
19
docs/source/en/api/models/z_image_transformer2d.md
Normal file
19
docs/source/en/api/models/z_image_transformer2d.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# ZImageTransformer2DModel
|
||||||
|
|
||||||
|
A Transformer model for image-like data from [Z-Image](https://huggingface.co/Tongyi-MAI/Z-Image-Turbo).
|
||||||
|
|
||||||
|
## ZImageTransformer2DModel
|
||||||
|
|
||||||
|
[[autodoc]] ZImageTransformer2DModel
|
||||||
@@ -39,11 +39,8 @@ For instance, retrieving an image by indexing into it returns the tuple `(output
|
|||||||
outputs[:1]
|
outputs[:1]
|
||||||
```
|
```
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> To check a specific pipeline or model output, refer to its corresponding API documentation.
|
||||||
To check a specific pipeline or model output, refer to its corresponding API documentation.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## BaseOutput
|
## BaseOutput
|
||||||
|
|
||||||
|
|||||||
24
docs/source/en/api/parallel.md
Normal file
24
docs/source/en/api/parallel.md
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
<!-- Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License. -->
|
||||||
|
|
||||||
|
# Parallelism
|
||||||
|
|
||||||
|
Parallelism strategies help speed up diffusion transformers by distributing computations across multiple devices, allowing for faster inference/training times. Refer to the [Distributed inferece](../training/distributed_inference) guide to learn more.
|
||||||
|
|
||||||
|
## ParallelConfig
|
||||||
|
|
||||||
|
[[autodoc]] ParallelConfig
|
||||||
|
|
||||||
|
## ContextParallelConfig
|
||||||
|
|
||||||
|
[[autodoc]] ContextParallelConfig
|
||||||
|
|
||||||
|
[[autodoc]] hooks.apply_context_parallel
|
||||||
@@ -17,11 +17,8 @@ The abstract from the paper is:
|
|||||||
|
|
||||||
*Significant advancements have been made in the field of video generation, with the open-source community contributing a wealth of research papers and tools for training high-quality models. However, despite these efforts, the available information and resources remain insufficient for achieving commercial-level performance. In this report, we open the black box and introduce Allegro, an advanced video generation model that excels in both quality and temporal consistency. We also highlight the current limitations in the field and present a comprehensive methodology for training high-performance, commercial-level video generation models, addressing key aspects such as data, model architecture, training pipeline, and evaluation. Our user study shows that Allegro surpasses existing open-source models and most commercial models, ranking just behind Hailuo and Kling. Code: https://github.com/rhymes-ai/Allegro , Model: https://huggingface.co/rhymes-ai/Allegro , Gallery: https://rhymes.ai/allegro_gallery .*
|
*Significant advancements have been made in the field of video generation, with the open-source community contributing a wealth of research papers and tools for training high-quality models. However, despite these efforts, the available information and resources remain insufficient for achieving commercial-level performance. In this report, we open the black box and introduce Allegro, an advanced video generation model that excels in both quality and temporal consistency. We also highlight the current limitations in the field and present a comprehensive methodology for training high-performance, commercial-level video generation models, addressing key aspects such as data, model architecture, training pipeline, and evaluation. Our user study shows that Allegro surpasses existing open-source models and most commercial models, ranking just behind Hailuo and Kling. Code: https://github.com/rhymes-ai/Allegro , Model: https://huggingface.co/rhymes-ai/Allegro , Gallery: https://rhymes.ai/allegro_gallery .*
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## Quantization
|
## Quantization
|
||||||
|
|
||||||
|
|||||||
@@ -102,11 +102,8 @@ Here are some sample outputs:
|
|||||||
</tr>
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> AnimateDiff tends to work better with finetuned Stable Diffusion models. If you plan on using a scheduler that can clip samples, make sure to disable it by setting `clip_sample=False` in the scheduler as this can also have an adverse effect on generated samples. Additionally, the AnimateDiff checkpoints can be sensitive to the beta schedule of the scheduler. We recommend setting this to `linear`.
|
||||||
AnimateDiff tends to work better with finetuned Stable Diffusion models. If you plan on using a scheduler that can clip samples, make sure to disable it by setting `clip_sample=False` in the scheduler as this can also have an adverse effect on generated samples. Additionally, the AnimateDiff checkpoints can be sensitive to the beta schedule of the scheduler. We recommend setting this to `linear`.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
### AnimateDiffControlNetPipeline
|
### AnimateDiffControlNetPipeline
|
||||||
|
|
||||||
@@ -799,17 +796,11 @@ frames = output.frames[0]
|
|||||||
export_to_gif(frames, "animation.gif")
|
export_to_gif(frames, "animation.gif")
|
||||||
```
|
```
|
||||||
|
|
||||||
<Tip warning={true}>
|
> [!WARNING]
|
||||||
|
> FreeInit is not really free - the improved quality comes at the cost of extra computation. It requires sampling a few extra times depending on the `num_iters` parameter that is set when enabling it. Setting the `use_fast_sampling` parameter to `True` can improve the overall performance (at the cost of lower quality compared to when `use_fast_sampling=False` but still better results than vanilla video generation models).
|
||||||
|
|
||||||
FreeInit is not really free - the improved quality comes at the cost of extra computation. It requires sampling a few extra times depending on the `num_iters` parameter that is set when enabling it. Setting the `use_fast_sampling` parameter to `True` can improve the overall performance (at the cost of lower quality compared to when `use_fast_sampling=False` but still better results than vanilla video generation models).
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
</Tip>
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
|
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
<table>
|
<table>
|
||||||
<tr>
|
<tr>
|
||||||
|
|||||||
@@ -23,11 +23,8 @@ The abstract from the paper is:
|
|||||||
|
|
||||||
You can find additional information about Attend-and-Excite on the [project page](https://attendandexcite.github.io/Attend-and-Excite/), the [original codebase](https://github.com/AttendAndExcite/Attend-and-Excite), or try it out in a [demo](https://huggingface.co/spaces/AttendAndExcite/Attend-and-Excite).
|
You can find additional information about Attend-and-Excite on the [project page](https://attendandexcite.github.io/Attend-and-Excite/), the [original codebase](https://github.com/AttendAndExcite/Attend-and-Excite), or try it out in a [demo](https://huggingface.co/spaces/AttendAndExcite/Attend-and-Excite).
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## StableDiffusionAttendAndExcitePipeline
|
## StableDiffusionAttendAndExcitePipeline
|
||||||
|
|
||||||
|
|||||||
@@ -38,11 +38,8 @@ During inference:
|
|||||||
* The _quality_ of the predicted audio sample can be controlled by the `num_inference_steps` argument; higher steps give higher quality audio at the expense of slower inference.
|
* The _quality_ of the predicted audio sample can be controlled by the `num_inference_steps` argument; higher steps give higher quality audio at the expense of slower inference.
|
||||||
* The _length_ of the predicted audio sample can be controlled by varying the `audio_length_in_s` argument.
|
* The _length_ of the predicted audio sample can be controlled by varying the `audio_length_in_s` argument.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## AudioLDMPipeline
|
## AudioLDMPipeline
|
||||||
[[autodoc]] AudioLDMPipeline
|
[[autodoc]] AudioLDMPipeline
|
||||||
|
|||||||
@@ -58,11 +58,8 @@ See table below for details on the three checkpoints:
|
|||||||
|
|
||||||
The following example demonstrates how to construct good music and speech generation using the aforementioned tips: [example](https://huggingface.co/docs/diffusers/main/en/api/pipelines/audioldm2#diffusers.AudioLDM2Pipeline.__call__.example).
|
The following example demonstrates how to construct good music and speech generation using the aforementioned tips: [example](https://huggingface.co/docs/diffusers/main/en/api/pipelines/audioldm2#diffusers.AudioLDM2Pipeline.__call__.example).
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## AudioLDM2Pipeline
|
## AudioLDM2Pipeline
|
||||||
[[autodoc]] AudioLDM2Pipeline
|
[[autodoc]] AudioLDM2Pipeline
|
||||||
|
|||||||
@@ -16,11 +16,8 @@ AuraFlow is inspired by [Stable Diffusion 3](../pipelines/stable_diffusion/stabl
|
|||||||
|
|
||||||
It was developed by the Fal team and more details about it can be found in [this blog post](https://blog.fal.ai/auraflow/).
|
It was developed by the Fal team and more details about it can be found in [this blog post](https://blog.fal.ai/auraflow/).
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> AuraFlow can be quite expensive to run on consumer hardware devices. However, you can perform a suite of optimizations to run it faster and in a more memory-friendly manner. Check out [this section](https://huggingface.co/blog/sd3#memory-optimizations-for-sd3) for more details.
|
||||||
AuraFlow can be quite expensive to run on consumer hardware devices. However, you can perform a suite of optimizations to run it faster and in a more memory-friendly manner. Check out [this section](https://huggingface.co/blog/sd3#memory-optimizations-for-sd3) for more details.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## Quantization
|
## Quantization
|
||||||
|
|
||||||
|
|||||||
@@ -26,11 +26,8 @@ The original codebase can be found at [salesforce/LAVIS](https://github.com/sale
|
|||||||
|
|
||||||
`BlipDiffusionPipeline` and `BlipDiffusionControlNetPipeline` were contributed by [`ayushtues`](https://github.com/ayushtues/).
|
`BlipDiffusionPipeline` and `BlipDiffusionControlNetPipeline` were contributed by [`ayushtues`](https://github.com/ayushtues/).
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
|
|
||||||
## BlipDiffusionPipeline
|
## BlipDiffusionPipeline
|
||||||
|
|||||||
45
docs/source/en/api/pipelines/bria_fibo.md
Normal file
45
docs/source/en/api/pipelines/bria_fibo.md
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Bria Fibo
|
||||||
|
|
||||||
|
Text-to-image models have mastered imagination - but not control. FIBO changes that.
|
||||||
|
|
||||||
|
FIBO is trained on structured JSON captions up to 1,000+ words and designed to understand and control different visual parameters such as lighting, composition, color, and camera settings, enabling precise and reproducible outputs.
|
||||||
|
|
||||||
|
With only 8 billion parameters, FIBO provides a new level of image quality, prompt adherence and proffesional control.
|
||||||
|
|
||||||
|
FIBO is trained exclusively on a structured prompt and will not work with freeform text prompts.
|
||||||
|
you can use the [FIBO-VLM-prompt-to-JSON](https://huggingface.co/briaai/FIBO-VLM-prompt-to-JSON) model or the [FIBO-gemini-prompt-to-JSON](https://huggingface.co/briaai/FIBO-gemini-prompt-to-JSON) to convert your freeform text prompt to a structured JSON prompt.
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> Avoid using freeform text prompts directly with FIBO because it does not produce the best results.
|
||||||
|
|
||||||
|
Refer to the Bria Fibo Hugging Face [page](https://huggingface.co/briaai/FIBO) to learn more.
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
_As the model is gated, before using it with diffusers you first need to go to the [Bria Fibo Hugging Face page](https://huggingface.co/briaai/FIBO), fill in the form and accept the gate. Once you are in, you need to login so that your system knows you’ve accepted the gate._
|
||||||
|
|
||||||
|
Use the command below to log in:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
hf auth login
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## BriaFiboPipeline
|
||||||
|
|
||||||
|
[[autodoc]] BriaFiboPipeline
|
||||||
|
- all
|
||||||
|
- __call__
|
||||||
@@ -19,23 +19,21 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
Chroma is a text to image generation model based on Flux.
|
Chroma is a text to image generation model based on Flux.
|
||||||
|
|
||||||
Original model checkpoints for Chroma can be found [here](https://huggingface.co/lodestones/Chroma).
|
Original model checkpoints for Chroma can be found here:
|
||||||
|
* High-resolution finetune: [lodestones/Chroma1-HD](https://huggingface.co/lodestones/Chroma1-HD)
|
||||||
|
* Base model: [lodestones/Chroma1-Base](https://huggingface.co/lodestones/Chroma1-Base)
|
||||||
|
* Original repo with progress checkpoints: [lodestones/Chroma](https://huggingface.co/lodestones/Chroma) (loading this repo with `from_pretrained` will load a Diffusers-compatible version of the `unlocked-v37` checkpoint)
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Chroma can use all the same optimizations as Flux.
|
||||||
Chroma can use all the same optimizations as Flux.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## Inference
|
## Inference
|
||||||
|
|
||||||
The Diffusers version of Chroma is based on the [`unlocked-v37`](https://huggingface.co/lodestones/Chroma/blob/main/chroma-unlocked-v37.safetensors) version of the original model, which is available in the [Chroma repository](https://huggingface.co/lodestones/Chroma).
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
from diffusers import ChromaPipeline
|
from diffusers import ChromaPipeline
|
||||||
|
|
||||||
pipe = ChromaPipeline.from_pretrained("lodestones/Chroma", torch_dtype=torch.bfloat16)
|
pipe = ChromaPipeline.from_pretrained("lodestones/Chroma1-HD", torch_dtype=torch.bfloat16)
|
||||||
pipe.enable_model_cpu_offload()
|
pipe.enable_model_cpu_offload()
|
||||||
|
|
||||||
prompt = [
|
prompt = [
|
||||||
@@ -66,10 +64,10 @@ Then run the following example
|
|||||||
import torch
|
import torch
|
||||||
from diffusers import ChromaTransformer2DModel, ChromaPipeline
|
from diffusers import ChromaTransformer2DModel, ChromaPipeline
|
||||||
|
|
||||||
model_id = "lodestones/Chroma"
|
model_id = "lodestones/Chroma1-HD"
|
||||||
dtype = torch.bfloat16
|
dtype = torch.bfloat16
|
||||||
|
|
||||||
transformer = ChromaTransformer2DModel.from_single_file("https://huggingface.co/lodestones/Chroma/blob/main/chroma-unlocked-v37.safetensors", torch_dtype=dtype)
|
transformer = ChromaTransformer2DModel.from_single_file("https://huggingface.co/lodestones/Chroma1-HD/blob/main/Chroma1-HD.safetensors", torch_dtype=dtype)
|
||||||
|
|
||||||
pipe = ChromaPipeline.from_pretrained(model_id, transformer=transformer, torch_dtype=dtype)
|
pipe = ChromaPipeline.from_pretrained(model_id, transformer=transformer, torch_dtype=dtype)
|
||||||
pipe.enable_model_cpu_offload()
|
pipe.enable_model_cpu_offload()
|
||||||
|
|||||||
211
docs/source/en/api/pipelines/chronoedit.md
Normal file
211
docs/source/en/api/pipelines/chronoedit.md
Normal file
@@ -0,0 +1,211 @@
|
|||||||
|
<!-- Copyright 2025 The ChronoEdit Team and HuggingFace Team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License. -->
|
||||||
|
|
||||||
|
<div style="float: right;">
|
||||||
|
<div class="flex flex-wrap space-x-1">
|
||||||
|
<a href="https://huggingface.co/docs/diffusers/main/en/tutorials/using_peft_for_inference" target="_blank" rel="noopener">
|
||||||
|
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
# ChronoEdit
|
||||||
|
|
||||||
|
[ChronoEdit: Towards Temporal Reasoning for Image Editing and World Simulation](https://huggingface.co/papers/2510.04290) from NVIDIA and University of Toronto, by Jay Zhangjie Wu, Xuanchi Ren, Tianchang Shen, Tianshi Cao, Kai He, Yifan Lu, Ruiyuan Gao, Enze Xie, Shiyi Lan, Jose M. Alvarez, Jun Gao, Sanja Fidler, Zian Wang, Huan Ling.
|
||||||
|
|
||||||
|
> **TL;DR:** ChronoEdit reframes image editing as a video generation task, using input and edited images as start/end frames to leverage pretrained video models with temporal consistency. A temporal reasoning stage introduces reasoning tokens to ensure physically plausible edits and visualize the editing trajectory.
|
||||||
|
|
||||||
|
*Recent advances in large generative models have greatly enhanced both image editing and in-context image generation, yet a critical gap remains in ensuring physical consistency, where edited objects must remain coherent. This capability is especially vital for world simulation related tasks. In this paper, we present ChronoEdit, a framework that reframes image editing as a video generation problem. First, ChronoEdit treats the input and edited images as the first and last frames of a video, allowing it to leverage large pretrained video generative models that capture not only object appearance but also the implicit physics of motion and interaction through learned temporal consistency. Second, ChronoEdit introduces a temporal reasoning stage that explicitly performs editing at inference time. Under this setting, target frame is jointly denoised with reasoning tokens to imagine a plausible editing trajectory that constrains the solution space to physically viable transformations. The reasoning tokens are then dropped after a few steps to avoid the high computational cost of rendering a full video. To validate ChronoEdit, we introduce PBench-Edit, a new benchmark of image-prompt pairs for contexts that require physical consistency, and demonstrate that ChronoEdit surpasses state-of-the-art baselines in both visual fidelity and physical plausibility. Project page for code and models: [this https URL](https://research.nvidia.com/labs/toronto-ai/chronoedit).*
|
||||||
|
|
||||||
|
The ChronoEdit pipeline is developed by the ChronoEdit Team. The original code is available on [GitHub](https://github.com/nv-tlabs/ChronoEdit), and pretrained models can be found in the [nvidia/ChronoEdit](https://huggingface.co/collections/nvidia/chronoedit) collection on Hugging Face.
|
||||||
|
|
||||||
|
Available Models/LoRAs:
|
||||||
|
- [nvidia/ChronoEdit-14B-Diffusers](https://huggingface.co/nvidia/ChronoEdit-14B-Diffusers)
|
||||||
|
- [nvidia/ChronoEdit-14B-Diffusers-Upscaler-Lora](https://huggingface.co/nvidia/ChronoEdit-14B-Diffusers-Upscaler-Lora)
|
||||||
|
- [nvidia/ChronoEdit-14B-Diffusers-Paint-Brush-Lora](https://huggingface.co/nvidia/ChronoEdit-14B-Diffusers-Paint-Brush-Lora)
|
||||||
|
|
||||||
|
### Image Editing
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
import numpy as np
|
||||||
|
from diffusers import AutoencoderKLWan, ChronoEditTransformer3DModel, ChronoEditPipeline
|
||||||
|
from diffusers.utils import export_to_video, load_image
|
||||||
|
from transformers import CLIPVisionModel
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
model_id = "nvidia/ChronoEdit-14B-Diffusers"
|
||||||
|
image_encoder = CLIPVisionModel.from_pretrained(model_id, subfolder="image_encoder", torch_dtype=torch.float32)
|
||||||
|
vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
|
||||||
|
transformer = ChronoEditTransformer3DModel.from_pretrained(model_id, subfolder="transformer", torch_dtype=torch.bfloat16)
|
||||||
|
pipe = ChronoEditPipeline.from_pretrained(model_id, image_encoder=image_encoder, transformer=transformer, vae=vae, torch_dtype=torch.bfloat16)
|
||||||
|
pipe.to("cuda")
|
||||||
|
|
||||||
|
image = load_image(
|
||||||
|
"https://huggingface.co/spaces/nvidia/ChronoEdit/resolve/main/examples/3.png"
|
||||||
|
)
|
||||||
|
max_area = 720 * 1280
|
||||||
|
aspect_ratio = image.height / image.width
|
||||||
|
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
|
||||||
|
height = round(np.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
|
||||||
|
width = round(np.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
|
||||||
|
print("width", width, "height", height)
|
||||||
|
image = image.resize((width, height))
|
||||||
|
prompt = (
|
||||||
|
"The user wants to transform the image by adding a small, cute mouse sitting inside the floral teacup, enjoying a spa bath. The mouse should appear relaxed and cheerful, with a tiny white bath towel draped over its head like a turban. It should be positioned comfortably in the cup’s liquid, with gentle steam rising around it to blend with the cozy atmosphere. "
|
||||||
|
"The mouse’s pose should be natural—perhaps sitting upright with paws resting lightly on the rim or submerged in the tea. The teacup’s floral design, gold trim, and warm lighting must remain unchanged to preserve the original aesthetic. The steam should softly swirl around the mouse, enhancing the spa-like, whimsical mood."
|
||||||
|
)
|
||||||
|
|
||||||
|
output = pipe(
|
||||||
|
image=image,
|
||||||
|
prompt=prompt,
|
||||||
|
height=height,
|
||||||
|
width=width,
|
||||||
|
num_frames=5,
|
||||||
|
num_inference_steps=50,
|
||||||
|
guidance_scale=5.0,
|
||||||
|
enable_temporal_reasoning=False,
|
||||||
|
num_temporal_reasoning_steps=0,
|
||||||
|
).frames[0]
|
||||||
|
Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8")).save("output.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
Optionally, enable **temporal reasoning** for improved physical consistency:
|
||||||
|
```py
|
||||||
|
output = pipe(
|
||||||
|
image=image,
|
||||||
|
prompt=prompt,
|
||||||
|
height=height,
|
||||||
|
width=width,
|
||||||
|
num_frames=29,
|
||||||
|
num_inference_steps=50,
|
||||||
|
guidance_scale=5.0,
|
||||||
|
enable_temporal_reasoning=True,
|
||||||
|
num_temporal_reasoning_steps=50,
|
||||||
|
).frames[0]
|
||||||
|
export_to_video(output, "output.mp4", fps=16)
|
||||||
|
Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8")).save("output.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Inference with 8-Step Distillation Lora
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
import numpy as np
|
||||||
|
from diffusers import AutoencoderKLWan, ChronoEditTransformer3DModel, ChronoEditPipeline
|
||||||
|
from diffusers.schedulers import UniPCMultistepScheduler
|
||||||
|
from diffusers.utils import export_to_video, load_image
|
||||||
|
from transformers import CLIPVisionModel
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
model_id = "nvidia/ChronoEdit-14B-Diffusers"
|
||||||
|
image_encoder = CLIPVisionModel.from_pretrained(model_id, subfolder="image_encoder", torch_dtype=torch.float32)
|
||||||
|
vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
|
||||||
|
transformer = ChronoEditTransformer3DModel.from_pretrained(model_id, subfolder="transformer", torch_dtype=torch.bfloat16)
|
||||||
|
pipe = ChronoEditPipeline.from_pretrained(model_id, image_encoder=image_encoder, transformer=transformer, vae=vae, torch_dtype=torch.bfloat16)
|
||||||
|
pipe.load_lora_weights("nvidia/ChronoEdit-14B-Diffusers", weight_name="lora/chronoedit_distill_lora.safetensors", adapter_name="distill")
|
||||||
|
pipe.fuse_lora(adapter_names=["distill"], lora_scale=1.0)
|
||||||
|
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=2.0)
|
||||||
|
pipe.to("cuda")
|
||||||
|
|
||||||
|
image = load_image(
|
||||||
|
"https://huggingface.co/spaces/nvidia/ChronoEdit/resolve/main/examples/3.png"
|
||||||
|
)
|
||||||
|
max_area = 720 * 1280
|
||||||
|
aspect_ratio = image.height / image.width
|
||||||
|
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
|
||||||
|
height = round(np.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
|
||||||
|
width = round(np.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
|
||||||
|
print("width", width, "height", height)
|
||||||
|
image = image.resize((width, height))
|
||||||
|
prompt = (
|
||||||
|
"The user wants to transform the image by adding a small, cute mouse sitting inside the floral teacup, enjoying a spa bath. The mouse should appear relaxed and cheerful, with a tiny white bath towel draped over its head like a turban. It should be positioned comfortably in the cup’s liquid, with gentle steam rising around it to blend with the cozy atmosphere. "
|
||||||
|
"The mouse’s pose should be natural—perhaps sitting upright with paws resting lightly on the rim or submerged in the tea. The teacup’s floral design, gold trim, and warm lighting must remain unchanged to preserve the original aesthetic. The steam should softly swirl around the mouse, enhancing the spa-like, whimsical mood."
|
||||||
|
)
|
||||||
|
|
||||||
|
output = pipe(
|
||||||
|
image=image,
|
||||||
|
prompt=prompt,
|
||||||
|
height=height,
|
||||||
|
width=width,
|
||||||
|
num_frames=5,
|
||||||
|
num_inference_steps=8,
|
||||||
|
guidance_scale=1.0,
|
||||||
|
enable_temporal_reasoning=False,
|
||||||
|
num_temporal_reasoning_steps=0,
|
||||||
|
).frames[0]
|
||||||
|
export_to_video(output, "output.mp4", fps=16)
|
||||||
|
Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8")).save("output.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Inference with Multiple LoRAs
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
import numpy as np
|
||||||
|
from diffusers import AutoencoderKLWan, ChronoEditTransformer3DModel, ChronoEditPipeline
|
||||||
|
from diffusers.schedulers import UniPCMultistepScheduler
|
||||||
|
from diffusers.utils import export_to_video, load_image
|
||||||
|
from transformers import CLIPVisionModel
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
model_id = "nvidia/ChronoEdit-14B-Diffusers"
|
||||||
|
image_encoder = CLIPVisionModel.from_pretrained(model_id, subfolder="image_encoder", torch_dtype=torch.float32)
|
||||||
|
vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
|
||||||
|
transformer = ChronoEditTransformer3DModel.from_pretrained(model_id, subfolder="transformer", torch_dtype=torch.bfloat16)
|
||||||
|
pipe = ChronoEditPipeline.from_pretrained(model_id, image_encoder=image_encoder, transformer=transformer, vae=vae, torch_dtype=torch.bfloat16)
|
||||||
|
pipe.load_lora_weights("nvidia/ChronoEdit-14B-Diffusers-Paint-Brush-Lora", weight_name="paintbrush_lora_diffusers.safetensors", adapter_name="paintbrush")
|
||||||
|
pipe.load_lora_weights("nvidia/ChronoEdit-14B-Diffusers", weight_name="lora/chronoedit_distill_lora.safetensors", adapter_name="distill")
|
||||||
|
pipe.fuse_lora(adapter_names=["paintbrush", "distill"], lora_scale=1.0)
|
||||||
|
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=2.0)
|
||||||
|
pipe.to("cuda")
|
||||||
|
|
||||||
|
image = load_image(
|
||||||
|
"https://raw.githubusercontent.com/nv-tlabs/ChronoEdit/refs/heads/main/assets/images/input_paintbrush.png"
|
||||||
|
)
|
||||||
|
max_area = 720 * 1280
|
||||||
|
aspect_ratio = image.height / image.width
|
||||||
|
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
|
||||||
|
height = round(np.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
|
||||||
|
width = round(np.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
|
||||||
|
print("width", width, "height", height)
|
||||||
|
image = image.resize((width, height))
|
||||||
|
prompt = (
|
||||||
|
"Turn the pencil sketch in the image into an actual object that is consistent with the image’s content. The user wants to change the sketch to a crown and a hat."
|
||||||
|
)
|
||||||
|
|
||||||
|
output = pipe(
|
||||||
|
image=image,
|
||||||
|
prompt=prompt,
|
||||||
|
height=height,
|
||||||
|
width=width,
|
||||||
|
num_frames=5,
|
||||||
|
num_inference_steps=8,
|
||||||
|
guidance_scale=1.0,
|
||||||
|
enable_temporal_reasoning=False,
|
||||||
|
num_temporal_reasoning_steps=0,
|
||||||
|
).frames[0]
|
||||||
|
export_to_video(output, "output.mp4", fps=16)
|
||||||
|
Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8")).save("output_1.png")
|
||||||
|
```
|
||||||
|
|
||||||
|
## ChronoEditPipeline
|
||||||
|
|
||||||
|
[[autodoc]] ChronoEditPipeline
|
||||||
|
- all
|
||||||
|
- __call__
|
||||||
|
|
||||||
|
## ChronoEditPipelineOutput
|
||||||
|
|
||||||
|
[[autodoc]] pipelines.chronoedit.pipeline_output.ChronoEditPipelineOutput
|
||||||
@@ -50,7 +50,7 @@ from diffusers.utils import export_to_video
|
|||||||
pipeline_quant_config = PipelineQuantizationConfig(
|
pipeline_quant_config = PipelineQuantizationConfig(
|
||||||
quant_backend="torchao",
|
quant_backend="torchao",
|
||||||
quant_kwargs={"quant_type": "int8wo"},
|
quant_kwargs={"quant_type": "int8wo"},
|
||||||
components_to_quantize=["transformer"]
|
components_to_quantize="transformer"
|
||||||
)
|
)
|
||||||
|
|
||||||
# fp8 layerwise weight-casting
|
# fp8 layerwise weight-casting
|
||||||
|
|||||||
@@ -21,11 +21,8 @@ The abstract from the paper is:
|
|||||||
|
|
||||||
*Recent advancements in text-to-image generative systems have been largely driven by diffusion models. However, single-stage text-to-image diffusion models still face challenges, in terms of computational efficiency and the refinement of image details. To tackle the issue, we propose CogView3, an innovative cascaded framework that enhances the performance of text-to-image diffusion. CogView3 is the first model implementing relay diffusion in the realm of text-to-image generation, executing the task by first creating low-resolution images and subsequently applying relay-based super-resolution. This methodology not only results in competitive text-to-image outputs but also greatly reduces both training and inference costs. Our experimental results demonstrate that CogView3 outperforms SDXL, the current state-of-the-art open-source text-to-image diffusion model, by 77.0% in human evaluations, all while requiring only about 1/2 of the inference time. The distilled variant of CogView3 achieves comparable performance while only utilizing 1/10 of the inference time by SDXL.*
|
*Recent advancements in text-to-image generative systems have been largely driven by diffusion models. However, single-stage text-to-image diffusion models still face challenges, in terms of computational efficiency and the refinement of image details. To tackle the issue, we propose CogView3, an innovative cascaded framework that enhances the performance of text-to-image diffusion. CogView3 is the first model implementing relay diffusion in the realm of text-to-image generation, executing the task by first creating low-resolution images and subsequently applying relay-based super-resolution. This methodology not only results in competitive text-to-image outputs but also greatly reduces both training and inference costs. Our experimental results demonstrate that CogView3 outperforms SDXL, the current state-of-the-art open-source text-to-image diffusion model, by 77.0% in human evaluations, all while requiring only about 1/2 of the inference time. The distilled variant of CogView3 achieves comparable performance while only utilizing 1/10 of the inference time by SDXL.*
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
This pipeline was contributed by [zRzRzRzRzRzRzR](https://github.com/zRzRzRzRzRzRzR). The original codebase can be found [here](https://huggingface.co/THUDM). The original weights can be found under [hf.co/THUDM](https://huggingface.co/THUDM).
|
This pipeline was contributed by [zRzRzRzRzRzRzR](https://github.com/zRzRzRzRzRzRzR). The original codebase can be found [here](https://huggingface.co/THUDM). The original weights can be found under [hf.co/THUDM](https://huggingface.co/THUDM).
|
||||||
|
|
||||||
|
|||||||
@@ -15,11 +15,8 @@
|
|||||||
|
|
||||||
# CogView4
|
# CogView4
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
This pipeline was contributed by [zRzRzRzRzRzRzR](https://github.com/zRzRzRzRzRzRzR). The original codebase can be found [here](https://huggingface.co/THUDM). The original weights can be found under [hf.co/THUDM](https://huggingface.co/THUDM).
|
This pipeline was contributed by [zRzRzRzRzRzRzR](https://github.com/zRzRzRzRzRzRzR). The original codebase can be found [here](https://huggingface.co/THUDM). The original weights can be found under [hf.co/THUDM](https://huggingface.co/THUDM).
|
||||||
|
|
||||||
|
|||||||
@@ -25,11 +25,8 @@ The abstract from the paper is:
|
|||||||
|
|
||||||
*Identity-preserving text-to-video (IPT2V) generation aims to create high-fidelity videos with consistent human identity. It is an important task in video generation but remains an open problem for generative models. This paper pushes the technical frontier of IPT2V in two directions that have not been resolved in the literature: (1) A tuning-free pipeline without tedious case-by-case finetuning, and (2) A frequency-aware heuristic identity-preserving Diffusion Transformer (DiT)-based control scheme. To achieve these goals, we propose **ConsisID**, a tuning-free DiT-based controllable IPT2V model to keep human-**id**entity **consis**tent in the generated video. Inspired by prior findings in frequency analysis of vision/diffusion transformers, it employs identity-control signals in the frequency domain, where facial features can be decomposed into low-frequency global features (e.g., profile, proportions) and high-frequency intrinsic features (e.g., identity markers that remain unaffected by pose changes). First, from a low-frequency perspective, we introduce a global facial extractor, which encodes the reference image and facial key points into a latent space, generating features enriched with low-frequency information. These features are then integrated into the shallow layers of the network to alleviate training challenges associated with DiT. Second, from a high-frequency perspective, we design a local facial extractor to capture high-frequency details and inject them into the transformer blocks, enhancing the model's ability to preserve fine-grained features. To leverage the frequency information for identity preservation, we propose a hierarchical training strategy, transforming a vanilla pre-trained video generation model into an IPT2V model. Extensive experiments demonstrate that our frequency-aware heuristic scheme provides an optimal control solution for DiT-based models. Thanks to this scheme, our **ConsisID** achieves excellent results in generating high-quality, identity-preserving videos, making strides towards more effective IPT2V. The model weight of ConsID is publicly available at https://github.com/PKU-YuanGroup/ConsisID.*
|
*Identity-preserving text-to-video (IPT2V) generation aims to create high-fidelity videos with consistent human identity. It is an important task in video generation but remains an open problem for generative models. This paper pushes the technical frontier of IPT2V in two directions that have not been resolved in the literature: (1) A tuning-free pipeline without tedious case-by-case finetuning, and (2) A frequency-aware heuristic identity-preserving Diffusion Transformer (DiT)-based control scheme. To achieve these goals, we propose **ConsisID**, a tuning-free DiT-based controllable IPT2V model to keep human-**id**entity **consis**tent in the generated video. Inspired by prior findings in frequency analysis of vision/diffusion transformers, it employs identity-control signals in the frequency domain, where facial features can be decomposed into low-frequency global features (e.g., profile, proportions) and high-frequency intrinsic features (e.g., identity markers that remain unaffected by pose changes). First, from a low-frequency perspective, we introduce a global facial extractor, which encodes the reference image and facial key points into a latent space, generating features enriched with low-frequency information. These features are then integrated into the shallow layers of the network to alleviate training challenges associated with DiT. Second, from a high-frequency perspective, we design a local facial extractor to capture high-frequency details and inject them into the transformer blocks, enhancing the model's ability to preserve fine-grained features. To leverage the frequency information for identity preservation, we propose a hierarchical training strategy, transforming a vanilla pre-trained video generation model into an IPT2V model. Extensive experiments demonstrate that our frequency-aware heuristic scheme provides an optimal control solution for DiT-based models. Thanks to this scheme, our **ConsisID** achieves excellent results in generating high-quality, identity-preserving videos, making strides towards more effective IPT2V. The model weight of ConsID is publicly available at https://github.com/PKU-YuanGroup/ConsisID.*
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers.md) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading.md#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers.md) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading.md#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
This pipeline was contributed by [SHYuanBest](https://github.com/SHYuanBest). The original codebase can be found [here](https://github.com/PKU-YuanGroup/ConsisID). The original weights can be found under [hf.co/BestWishYsh](https://huggingface.co/BestWishYsh).
|
This pipeline was contributed by [SHYuanBest](https://github.com/SHYuanBest). The original codebase can be found [here](https://github.com/PKU-YuanGroup/ConsisID). The original weights can be found under [hf.co/BestWishYsh](https://huggingface.co/BestWishYsh).
|
||||||
|
|
||||||
|
|||||||
@@ -26,11 +26,8 @@ FLUX.1 Depth and Canny [dev] is a 12 billion parameter rectified flow transforme
|
|||||||
| Canny | [Black Forest Labs](https://huggingface.co/black-forest-labs) | [Link](https://huggingface.co/black-forest-labs/FLUX.1-Canny-dev) |
|
| Canny | [Black Forest Labs](https://huggingface.co/black-forest-labs) | [Link](https://huggingface.co/black-forest-labs/FLUX.1-Canny-dev) |
|
||||||
|
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Flux can be quite expensive to run on consumer hardware devices. However, you can perform a suite of optimizations to run it faster and in a more memory-friendly manner. Check out [this section](https://huggingface.co/blog/sd3#memory-optimizations-for-sd3) for more details. Additionally, Flux can benefit from quantization for memory efficiency with a trade-off in inference latency. Refer to [this blog post](https://huggingface.co/blog/quanto-diffusers) to learn more. For an exhaustive list of resources, check out [this gist](https://gist.github.com/sayakpaul/b664605caf0aa3bf8585ab109dd5ac9c).
|
||||||
Flux can be quite expensive to run on consumer hardware devices. However, you can perform a suite of optimizations to run it faster and in a more memory-friendly manner. Check out [this section](https://huggingface.co/blog/sd3#memory-optimizations-for-sd3) for more details. Additionally, Flux can benefit from quantization for memory efficiency with a trade-off in inference latency. Refer to [this blog post](https://huggingface.co/blog/quanto-diffusers) to learn more. For an exhaustive list of resources, check out [this gist](https://gist.github.com/sayakpaul/b664605caf0aa3bf8585ab109dd5ac9c).
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
|
|||||||
@@ -28,11 +28,8 @@ This model was contributed by [takuma104](https://huggingface.co/takuma104). ❤
|
|||||||
|
|
||||||
The original codebase can be found at [lllyasviel/ControlNet](https://github.com/lllyasviel/ControlNet), and you can find official ControlNet checkpoints on [lllyasviel's](https://huggingface.co/lllyasviel) Hub profile.
|
The original codebase can be found at [lllyasviel/ControlNet](https://github.com/lllyasviel/ControlNet), and you can find official ControlNet checkpoints on [lllyasviel's](https://huggingface.co/lllyasviel) Hub profile.
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## StableDiffusionControlNetPipeline
|
## StableDiffusionControlNetPipeline
|
||||||
[[autodoc]] StableDiffusionControlNetPipeline
|
[[autodoc]] StableDiffusionControlNetPipeline
|
||||||
|
|||||||
@@ -44,11 +44,8 @@ XLabs ControlNets are also supported, which was contributed by the [XLabs team](
|
|||||||
| HED | [The XLabs Team](https://huggingface.co/XLabs-AI) | [Link](https://huggingface.co/XLabs-AI/flux-controlnet-hed-diffusers) |
|
| HED | [The XLabs Team](https://huggingface.co/XLabs-AI) | [Link](https://huggingface.co/XLabs-AI/flux-controlnet-hed-diffusers) |
|
||||||
|
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## FluxControlNetPipeline
|
## FluxControlNetPipeline
|
||||||
[[autodoc]] FluxControlNetPipeline
|
[[autodoc]] FluxControlNetPipeline
|
||||||
|
|||||||
@@ -24,11 +24,8 @@ The abstract from the paper is:
|
|||||||
|
|
||||||
This code is implemented by Tencent Hunyuan Team. You can find pre-trained checkpoints for Hunyuan-DiT ControlNets on [Tencent Hunyuan](https://huggingface.co/Tencent-Hunyuan).
|
This code is implemented by Tencent Hunyuan Team. You can find pre-trained checkpoints for Hunyuan-DiT ControlNets on [Tencent Hunyuan](https://huggingface.co/Tencent-Hunyuan).
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## HunyuanDiTControlNetPipeline
|
## HunyuanDiTControlNetPipeline
|
||||||
[[autodoc]] HunyuanDiTControlNetPipeline
|
[[autodoc]] HunyuanDiTControlNetPipeline
|
||||||
|
|||||||
@@ -38,11 +38,8 @@ This controlnet code is mainly implemented by [The InstantX Team](https://huggin
|
|||||||
| Inpainting | [The AlimamaCreative Team](https://huggingface.co/alimama-creative) | [link](https://huggingface.co/alimama-creative/SD3-Controlnet-Inpainting) |
|
| Inpainting | [The AlimamaCreative Team](https://huggingface.co/alimama-creative) | [link](https://huggingface.co/alimama-creative/SD3-Controlnet-Inpainting) |
|
||||||
|
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## StableDiffusion3ControlNetPipeline
|
## StableDiffusion3ControlNetPipeline
|
||||||
[[autodoc]] StableDiffusion3ControlNetPipeline
|
[[autodoc]] StableDiffusion3ControlNetPipeline
|
||||||
|
|||||||
@@ -26,19 +26,13 @@ The abstract from the paper is:
|
|||||||
|
|
||||||
You can find additional smaller Stable Diffusion XL (SDXL) ControlNet checkpoints from the 🤗 [Diffusers](https://huggingface.co/diffusers) Hub organization, and browse [community-trained](https://huggingface.co/models?other=stable-diffusion-xl&other=controlnet) checkpoints on the Hub.
|
You can find additional smaller Stable Diffusion XL (SDXL) ControlNet checkpoints from the 🤗 [Diffusers](https://huggingface.co/diffusers) Hub organization, and browse [community-trained](https://huggingface.co/models?other=stable-diffusion-xl&other=controlnet) checkpoints on the Hub.
|
||||||
|
|
||||||
<Tip warning={true}>
|
> [!WARNING]
|
||||||
|
> 🧪 Many of the SDXL ControlNet checkpoints are experimental, and there is a lot of room for improvement. Feel free to open an [Issue](https://github.com/huggingface/diffusers/issues/new/choose) and leave us feedback on how we can improve!
|
||||||
🧪 Many of the SDXL ControlNet checkpoints are experimental, and there is a lot of room for improvement. Feel free to open an [Issue](https://github.com/huggingface/diffusers/issues/new/choose) and leave us feedback on how we can improve!
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
If you don't see a checkpoint you're interested in, you can train your own SDXL ControlNet with our [training script](../../../../../examples/controlnet/README_sdxl).
|
If you don't see a checkpoint you're interested in, you can train your own SDXL ControlNet with our [training script](../../../../../examples/controlnet/README_sdxl).
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## StableDiffusionXLControlNetPipeline
|
## StableDiffusionXLControlNetPipeline
|
||||||
[[autodoc]] StableDiffusionXLControlNetPipeline
|
[[autodoc]] StableDiffusionXLControlNetPipeline
|
||||||
|
|||||||
@@ -31,11 +31,8 @@ Here's the overview from the [project page](https://vislearn.github.io/ControlNe
|
|||||||
|
|
||||||
This model was contributed by [UmerHA](https://twitter.com/UmerHAdil). ❤️
|
This model was contributed by [UmerHA](https://twitter.com/UmerHAdil). ❤️
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## StableDiffusionControlNetXSPipeline
|
## StableDiffusionControlNetXSPipeline
|
||||||
[[autodoc]] StableDiffusionControlNetXSPipeline
|
[[autodoc]] StableDiffusionControlNetXSPipeline
|
||||||
|
|||||||
@@ -27,17 +27,11 @@ Here's the overview from the [project page](https://vislearn.github.io/ControlNe
|
|||||||
|
|
||||||
This model was contributed by [UmerHA](https://twitter.com/UmerHAdil). ❤️
|
This model was contributed by [UmerHA](https://twitter.com/UmerHAdil). ❤️
|
||||||
|
|
||||||
<Tip warning={true}>
|
> [!WARNING]
|
||||||
|
> 🧪 Many of the SDXL ControlNet checkpoints are experimental, and there is a lot of room for improvement. Feel free to open an [Issue](https://github.com/huggingface/diffusers/issues/new/choose) and leave us feedback on how we can improve!
|
||||||
|
|
||||||
🧪 Many of the SDXL ControlNet checkpoints are experimental, and there is a lot of room for improvement. Feel free to open an [Issue](https://github.com/huggingface/diffusers/issues/new/choose) and leave us feedback on how we can improve!
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
</Tip>
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
|
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## StableDiffusionXLControlNetXSPipeline
|
## StableDiffusionXLControlNetXSPipeline
|
||||||
[[autodoc]] StableDiffusionXLControlNetXSPipeline
|
[[autodoc]] StableDiffusionXLControlNetXSPipeline
|
||||||
|
|||||||
@@ -18,11 +18,8 @@
|
|||||||
|
|
||||||
*Physical AI needs to be trained digitally first. It needs a digital twin of itself, the policy model, and a digital twin of the world, the world model. In this paper, we present the Cosmos World Foundation Model Platform to help developers build customized world models for their Physical AI setups. We position a world foundation model as a general-purpose world model that can be fine-tuned into customized world models for downstream applications. Our platform covers a video curation pipeline, pre-trained world foundation models, examples of post-training of pre-trained world foundation models, and video tokenizers. To help Physical AI builders solve the most critical problems of our society, we make our platform open-source and our models open-weight with permissive licenses available via https://github.com/NVIDIA/Cosmos.*
|
*Physical AI needs to be trained digitally first. It needs a digital twin of itself, the policy model, and a digital twin of the world, the world model. In this paper, we present the Cosmos World Foundation Model Platform to help developers build customized world models for their Physical AI setups. We position a world foundation model as a general-purpose world model that can be fine-tuned into customized world models for downstream applications. Our platform covers a video curation pipeline, pre-trained world foundation models, examples of post-training of pre-trained world foundation models, and video tokenizers. To help Physical AI builders solve the most critical problems of our society, we make our platform open-source and our models open-weight with permissive licenses available via https://github.com/NVIDIA/Cosmos.*
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## Loading original format checkpoints
|
## Loading original format checkpoints
|
||||||
|
|
||||||
@@ -73,6 +70,12 @@ output.save("output.png")
|
|||||||
- all
|
- all
|
||||||
- __call__
|
- __call__
|
||||||
|
|
||||||
|
## Cosmos2_5_PredictBasePipeline
|
||||||
|
|
||||||
|
[[autodoc]] Cosmos2_5_PredictBasePipeline
|
||||||
|
- all
|
||||||
|
- __call__
|
||||||
|
|
||||||
## CosmosPipelineOutput
|
## CosmosPipelineOutput
|
||||||
|
|
||||||
[[autodoc]] pipelines.cosmos.pipeline_output.CosmosPipelineOutput
|
[[autodoc]] pipelines.cosmos.pipeline_output.CosmosPipelineOutput
|
||||||
|
|||||||
@@ -20,11 +20,8 @@ specific language governing permissions and limitations under the License.
|
|||||||
Dance Diffusion is the first in a suite of generative audio tools for producers and musicians released by [Harmonai](https://github.com/Harmonai-org).
|
Dance Diffusion is the first in a suite of generative audio tools for producers and musicians released by [Harmonai](https://github.com/Harmonai-org).
|
||||||
|
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## DanceDiffusionPipeline
|
## DanceDiffusionPipeline
|
||||||
[[autodoc]] DanceDiffusionPipeline
|
[[autodoc]] DanceDiffusionPipeline
|
||||||
|
|||||||
@@ -20,11 +20,8 @@ The abstract from the paper is:
|
|||||||
|
|
||||||
The original codebase can be found at [hohonathanho/diffusion](https://github.com/hojonathanho/diffusion).
|
The original codebase can be found at [hohonathanho/diffusion](https://github.com/hojonathanho/diffusion).
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
# DDPMPipeline
|
# DDPMPipeline
|
||||||
[[autodoc]] DDPMPipeline
|
[[autodoc]] DDPMPipeline
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ The abstract from the paper is:
|
|||||||
|
|
||||||
*Image generation has recently seen tremendous advances, with diffusion models allowing to synthesize convincing images for a large variety of text prompts. In this article, we propose DiffEdit, a method to take advantage of text-conditioned diffusion models for the task of semantic image editing, where the goal is to edit an image based on a text query. Semantic image editing is an extension of image generation, with the additional constraint that the generated image should be as similar as possible to a given input image. Current editing methods based on diffusion models usually require to provide a mask, making the task much easier by treating it as a conditional inpainting task. In contrast, our main contribution is able to automatically generate a mask highlighting regions of the input image that need to be edited, by contrasting predictions of a diffusion model conditioned on different text prompts. Moreover, we rely on latent inference to preserve content in those regions of interest and show excellent synergies with mask-based diffusion. DiffEdit achieves state-of-the-art editing performance on ImageNet. In addition, we evaluate semantic image editing in more challenging settings, using images from the COCO dataset as well as text-based generated images.*
|
*Image generation has recently seen tremendous advances, with diffusion models allowing to synthesize convincing images for a large variety of text prompts. In this article, we propose DiffEdit, a method to take advantage of text-conditioned diffusion models for the task of semantic image editing, where the goal is to edit an image based on a text query. Semantic image editing is an extension of image generation, with the additional constraint that the generated image should be as similar as possible to a given input image. Current editing methods based on diffusion models usually require to provide a mask, making the task much easier by treating it as a conditional inpainting task. In contrast, our main contribution is able to automatically generate a mask highlighting regions of the input image that need to be edited, by contrasting predictions of a diffusion model conditioned on different text prompts. Moreover, we rely on latent inference to preserve content in those regions of interest and show excellent synergies with mask-based diffusion. DiffEdit achieves state-of-the-art editing performance on ImageNet. In addition, we evaluate semantic image editing in more challenging settings, using images from the COCO dataset as well as text-based generated images.*
|
||||||
|
|
||||||
The original codebase can be found at [Xiang-cd/DiffEdit-stable-diffusion](https://github.com/Xiang-cd/DiffEdit-stable-diffusion), and you can try it out in this [demo](https://blog.problemsolversguild.com/technical/research/2022/11/02/DiffEdit-Implementation.html).
|
The original codebase can be found at [Xiang-cd/DiffEdit-stable-diffusion](https://github.com/Xiang-cd/DiffEdit-stable-diffusion), and you can try it out in this [demo](https://blog.problemsolversguild.com/posts/2022-11-02-diffedit-implementation.html).
|
||||||
|
|
||||||
This pipeline was contributed by [clarencechen](https://github.com/clarencechen). ❤️
|
This pipeline was contributed by [clarencechen](https://github.com/clarencechen). ❤️
|
||||||
|
|
||||||
|
|||||||
@@ -20,11 +20,8 @@ The abstract from the paper is:
|
|||||||
|
|
||||||
The original codebase can be found at [facebookresearch/dit](https://github.com/facebookresearch/dit).
|
The original codebase can be found at [facebookresearch/dit](https://github.com/facebookresearch/dit).
|
||||||
|
|
||||||
<Tip>
|
> [!TIP]
|
||||||
|
> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## DiTPipeline
|
## DiTPipeline
|
||||||
[[autodoc]] DiTPipeline
|
[[autodoc]] DiTPipeline
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user