mirror of
https://github.com/huggingface/diffusers.git
synced 2025-12-10 22:44:38 +08:00
Compare commits
97 Commits
custom-gc-
...
poc_branch
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
16b10bf88f | ||
|
|
37a5f1b3b6 | ||
|
|
501d9de701 | ||
|
|
e5c43b8af7 | ||
|
|
9a8e8db79f | ||
|
|
764d7ed49a | ||
|
|
3fab6624fd | ||
|
|
f0ac7aaafc | ||
|
|
613e77f8be | ||
|
|
1450c2ac4f | ||
|
|
cc7b5b873a | ||
|
|
0404703237 | ||
|
|
13f20c7fe8 | ||
|
|
87599691b9 | ||
|
|
36517f6124 | ||
|
|
64af74fc58 | ||
|
|
170833c22a | ||
|
|
db21c97043 | ||
|
|
3fdf173084 | ||
|
|
aba4a5799a | ||
|
|
b0550a66cc | ||
|
|
6f74ef550d | ||
|
|
9c7e205176 | ||
|
|
64dec70e56 | ||
|
|
ffb6777ace | ||
|
|
85fcbaf314 | ||
|
|
d75ea3c772 | ||
|
|
b27d4edbe1 | ||
|
|
2b2d04299c | ||
|
|
6cef7d2366 | ||
|
|
9055ccb382 | ||
|
|
1871a69ecb | ||
|
|
e3bc4aab2e | ||
|
|
f0707751ef | ||
|
|
d9ee3879b0 | ||
|
|
454f82e6fc | ||
|
|
1f853504da | ||
|
|
51941387dc | ||
|
|
c7a8c4395a | ||
|
|
a4c1aac3ae | ||
|
|
b2ca39c8ac | ||
|
|
532171266b | ||
|
|
f550745a2b | ||
|
|
f10d3c6d04 | ||
|
|
0fb7068364 | ||
|
|
f8b54cf037 | ||
|
|
680a8ed855 | ||
|
|
f5929e0306 | ||
|
|
6fe05b9b93 | ||
|
|
2bc82d6381 | ||
|
|
924f880d4d | ||
|
|
b75b204a58 | ||
|
|
c14057c8db | ||
|
|
3579cd2bb7 | ||
|
|
3e99b5677e | ||
|
|
952b9131a2 | ||
|
|
d90cd3621d | ||
|
|
69f919d8b5 | ||
|
|
a6b843a797 | ||
|
|
27b90235e4 | ||
|
|
9a147b82f7 | ||
|
|
ab428207a7 | ||
|
|
8d081de844 | ||
|
|
a0c22997fd | ||
|
|
97abdd2210 | ||
|
|
051ebc3c8d | ||
|
|
5105b5a83d | ||
|
|
ca6330dc53 | ||
|
|
28f48f4051 | ||
|
|
067eab1b3a | ||
|
|
57ac673802 | ||
|
|
81440fd474 | ||
|
|
c470274865 | ||
|
|
798e17187d | ||
|
|
ed4b75229f | ||
|
|
8ae8008b0d | ||
|
|
c80eda9d3e | ||
|
|
7fb481f840 | ||
|
|
9f5ad1db41 | ||
|
|
464374fb87 | ||
|
|
d43ce14e2d | ||
|
|
cd0a4a82cf | ||
|
|
145522cbb7 | ||
|
|
23bc56a02d | ||
|
|
5b1dcd1584 | ||
|
|
dbe0094e86 | ||
|
|
f63d32233f | ||
|
|
5e8e6cb44f | ||
|
|
3e35f56b00 | ||
|
|
537891e693 | ||
|
|
9f28f1abba | ||
|
|
5d2d23986e | ||
|
|
1ae9b0595f | ||
|
|
aad69ac2f3 | ||
|
|
ea76880bd7 | ||
|
|
33f936154d | ||
|
|
e6037e8275 |
38
.github/ISSUE_TEMPLATE/remote-vae-pilot-feedback.yml
vendored
Normal file
38
.github/ISSUE_TEMPLATE/remote-vae-pilot-feedback.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
name: "\U0001F31F Remote VAE"
|
||||
description: Feedback for remote VAE pilot
|
||||
labels: [ "Remote VAE" ]
|
||||
|
||||
body:
|
||||
- type: textarea
|
||||
id: positive
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Did you like the remote VAE solution?
|
||||
description: |
|
||||
If you liked it, we would appreciate it if you could elaborate what you liked.
|
||||
|
||||
- type: textarea
|
||||
id: feedback
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: What can be improved about the current solution?
|
||||
description: |
|
||||
Let us know the things you would like to see improved. Note that we will work optimizing the solution once the pilot is over and we have usage.
|
||||
|
||||
- type: textarea
|
||||
id: others
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: What other VAEs you would like to see if the pilot goes well?
|
||||
description: |
|
||||
Provide a list of the VAEs you would like to see in the future if the pilot goes well.
|
||||
|
||||
- type: textarea
|
||||
id: additional-info
|
||||
attributes:
|
||||
label: Notify the members of the team
|
||||
description: |
|
||||
Tag the following folks when submitting this feedback: @hlky @sayakpaul
|
||||
127
.github/workflows/pr_style_bot.yml
vendored
Normal file
127
.github/workflows/pr_style_bot.yml
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
name: PR Style Bot
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
run-style-bot:
|
||||
if: >
|
||||
contains(github.event.comment.body, '@bot /style') &&
|
||||
github.event.issue.pull_request != null
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Extract PR details
|
||||
id: pr_info
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
const prNumber = context.payload.issue.number;
|
||||
const { data: pr } = await github.rest.pulls.get({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: prNumber
|
||||
});
|
||||
|
||||
// We capture both the branch ref and the "full_name" of the head repo
|
||||
// so that we can check out the correct repository & branch (including forks).
|
||||
core.setOutput("prNumber", prNumber);
|
||||
core.setOutput("headRef", pr.head.ref);
|
||||
core.setOutput("headRepoFullName", pr.head.repo.full_name);
|
||||
|
||||
- name: Check out PR branch
|
||||
uses: actions/checkout@v3
|
||||
env:
|
||||
HEADREPOFULLNAME: ${{ steps.pr_info.outputs.headRepoFullName }}
|
||||
HEADREF: ${{ steps.pr_info.outputs.headRef }}
|
||||
with:
|
||||
# Instead of checking out the base repo, use the contributor's repo name
|
||||
repository: ${{ env.HEADREPOFULLNAME }}
|
||||
ref: ${{ env.HEADREF }}
|
||||
# You may need fetch-depth: 0 for being able to push
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Debug
|
||||
env:
|
||||
HEADREPOFULLNAME: ${{ steps.pr_info.outputs.headRepoFullName }}
|
||||
HEADREF: ${{ steps.pr_info.outputs.headRef }}
|
||||
PRNUMBER: ${{ steps.pr_info.outputs.prNumber }}
|
||||
run: |
|
||||
echo "PR number: $PRNUMBER"
|
||||
echo "Head Ref: $HEADREF"
|
||||
echo "Head Repo Full Name: $HEADREPOFULLNAME"
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install .[quality]
|
||||
|
||||
- name: Download Makefile from main branch
|
||||
run: |
|
||||
curl -o main_Makefile https://raw.githubusercontent.com/huggingface/diffusers/main/Makefile
|
||||
|
||||
- name: Compare Makefiles
|
||||
run: |
|
||||
if ! diff -q main_Makefile Makefile; then
|
||||
echo "Error: The Makefile has changed. Please ensure it matches the main branch."
|
||||
exit 1
|
||||
fi
|
||||
echo "No changes in Makefile. Proceeding..."
|
||||
rm -rf main_Makefile
|
||||
|
||||
- name: Run make style and make quality
|
||||
run: |
|
||||
make style && make quality
|
||||
|
||||
- name: Commit and push changes
|
||||
id: commit_and_push
|
||||
env:
|
||||
HEADREPOFULLNAME: ${{ steps.pr_info.outputs.headRepoFullName }}
|
||||
HEADREF: ${{ steps.pr_info.outputs.headRef }}
|
||||
PRNUMBER: ${{ steps.pr_info.outputs.prNumber }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
echo "HEADREPOFULLNAME: $HEADREPOFULLNAME, HEADREF: $HEADREF"
|
||||
# Configure git with the Actions bot user
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
# Make sure your 'origin' remote is set to the contributor's fork
|
||||
git remote set-url origin "https://x-access-token:${GITHUB_TOKEN}@github.com/$HEADREPOFULLNAME.git"
|
||||
|
||||
# If there are changes after running style/quality, commit them
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
git add .
|
||||
git commit -m "Apply style fixes"
|
||||
# Push to the original contributor's forked branch
|
||||
git push origin HEAD:$HEADREF
|
||||
echo "changes_pushed=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "No changes to commit."
|
||||
echo "changes_pushed=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Comment on PR with workflow run link
|
||||
if: steps.commit_and_push.outputs.changes_pushed == 'true'
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
const prNumber = parseInt(process.env.prNumber, 10);
|
||||
const runUrl = `${process.env.GITHUB_SERVER_URL}/${process.env.GITHUB_REPOSITORY}/actions/runs/${process.env.GITHUB_RUN_ID}`
|
||||
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: prNumber,
|
||||
body: `Style fixes have been applied. [View the workflow run here](${runUrl}).`
|
||||
});
|
||||
env:
|
||||
prNumber: ${{ steps.pr_info.outputs.prNumber }}
|
||||
8
.github/workflows/pr_tests.yml
vendored
8
.github/workflows/pr_tests.yml
vendored
@@ -2,8 +2,8 @@ name: Fast tests for PRs
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
branches: [main]
|
||||
types: [synchronize]
|
||||
paths:
|
||||
- "src/diffusers/**.py"
|
||||
- "benchmarks/**.py"
|
||||
@@ -64,6 +64,7 @@ jobs:
|
||||
run: |
|
||||
python utils/check_copies.py
|
||||
python utils/check_dummies.py
|
||||
python utils/check_support_list.py
|
||||
make deps_table_check_updated
|
||||
- name: Check if failure
|
||||
if: ${{ failure() }}
|
||||
@@ -120,7 +121,8 @@ jobs:
|
||||
run: |
|
||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||
python -m uv pip install -e [quality,test]
|
||||
python -m uv pip install accelerate
|
||||
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
|
||||
|
||||
- name: Environment
|
||||
run: |
|
||||
|
||||
250
.github/workflows/pr_tests_gpu.yml
vendored
Normal file
250
.github/workflows/pr_tests_gpu.yml
vendored
Normal file
@@ -0,0 +1,250 @@
|
||||
name: Fast GPU Tests on PR
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: main
|
||||
paths:
|
||||
- "src/diffusers/models/modeling_utils.py"
|
||||
- "src/diffusers/models/model_loading_utils.py"
|
||||
- "src/diffusers/pipelines/pipeline_utils.py"
|
||||
- "src/diffusers/pipeline_loading_utils.py"
|
||||
- "src/diffusers/loaders/lora_base.py"
|
||||
- "src/diffusers/loaders/lora_pipeline.py"
|
||||
- "src/diffusers/loaders/peft.py"
|
||||
- "tests/pipelines/test_pipelines_common.py"
|
||||
- "tests/models/test_modeling_common.py"
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
DIFFUSERS_IS_CI: yes
|
||||
OMP_NUM_THREADS: 8
|
||||
MKL_NUM_THREADS: 8
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 1
|
||||
PYTEST_TIMEOUT: 600
|
||||
PIPELINE_USAGE_CUTOFF: 1000000000 # set high cutoff so that only always-test pipelines run
|
||||
|
||||
jobs:
|
||||
setup_torch_cuda_pipeline_matrix:
|
||||
name: Setup Torch Pipelines CUDA Slow Tests Matrix
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
container:
|
||||
image: diffusers/diffusers-pytorch-cpu
|
||||
outputs:
|
||||
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
||||
steps:
|
||||
- name: Checkout diffusers
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||
python -m uv pip install -e [quality,test]
|
||||
- name: Environment
|
||||
run: |
|
||||
python utils/print_env.py
|
||||
- name: Fetch Pipeline Matrix
|
||||
id: fetch_pipeline_matrix
|
||||
run: |
|
||||
matrix=$(python utils/fetch_torch_cuda_pipeline_test_matrix.py)
|
||||
echo $matrix
|
||||
echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT
|
||||
- name: Pipeline Tests Artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-pipelines.json
|
||||
path: reports
|
||||
|
||||
torch_pipelines_cuda_tests:
|
||||
name: Torch Pipelines CUDA Tests
|
||||
needs: setup_torch_cuda_pipeline_matrix
|
||||
strategy:
|
||||
fail-fast: false
|
||||
max-parallel: 8
|
||||
matrix:
|
||||
module: ${{ fromJson(needs.setup_torch_cuda_pipeline_matrix.outputs.pipeline_test_matrix) }}
|
||||
runs-on:
|
||||
group: aws-g4dn-2xlarge
|
||||
container:
|
||||
image: diffusers/diffusers-pytorch-cuda
|
||||
options: --shm-size "16gb" --ipc host --gpus 0
|
||||
steps:
|
||||
- name: Checkout diffusers
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||
python -m uv pip install -e [quality,test]
|
||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
||||
|
||||
- name: Environment
|
||||
run: |
|
||||
python utils/print_env.py
|
||||
- name: Extract tests
|
||||
id: extract_tests
|
||||
run: |
|
||||
pattern=$(python utils/extract_tests_from_mixin.py --type pipeline)
|
||||
echo "$pattern" > /tmp/test_pattern.txt
|
||||
echo "pattern_file=/tmp/test_pattern.txt" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: PyTorch CUDA checkpoint tests on Ubuntu
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||
run: |
|
||||
if [ "${{ matrix.module }}" = "ip_adapters" ]; then
|
||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||
-s -v -k "not Flax and not Onnx" \
|
||||
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
||||
tests/pipelines/${{ matrix.module }}
|
||||
else
|
||||
pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }})
|
||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||
-s -v -k "not Flax and not Onnx and $pattern" \
|
||||
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
||||
tests/pipelines/${{ matrix.module }}
|
||||
fi
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
run: |
|
||||
cat reports/tests_pipeline_${{ matrix.module }}_cuda_stats.txt
|
||||
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: pipeline_${{ matrix.module }}_test_reports
|
||||
path: reports
|
||||
|
||||
torch_cuda_tests:
|
||||
name: Torch CUDA Tests
|
||||
runs-on:
|
||||
group: aws-g4dn-2xlarge
|
||||
container:
|
||||
image: diffusers/diffusers-pytorch-cuda
|
||||
options: --shm-size "16gb" --ipc host --gpus 0
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
fail-fast: false
|
||||
max-parallel: 2
|
||||
matrix:
|
||||
module: [models, schedulers, lora, others]
|
||||
steps:
|
||||
- name: Checkout diffusers
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||
python -m uv pip install -e [quality,test]
|
||||
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
||||
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
||||
|
||||
- name: Environment
|
||||
run: |
|
||||
python utils/print_env.py
|
||||
|
||||
- name: Extract tests
|
||||
id: extract_tests
|
||||
run: |
|
||||
pattern=$(python utils/extract_tests_from_mixin.py --type ${{ matrix.module }})
|
||||
echo "$pattern" > /tmp/test_pattern.txt
|
||||
echo "pattern_file=/tmp/test_pattern.txt" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Run PyTorch CUDA tests
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||
run: |
|
||||
pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }})
|
||||
if [ -z "$pattern" ]; then
|
||||
python -m pytest -n 1 -sv --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx" tests/${{ matrix.module }} \
|
||||
--make-reports=tests_torch_cuda_${{ matrix.module }}
|
||||
else
|
||||
python -m pytest -n 1 -sv --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx and $pattern" tests/${{ matrix.module }} \
|
||||
--make-reports=tests_torch_cuda_${{ matrix.module }}
|
||||
fi
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
run: |
|
||||
cat reports/tests_torch_cuda_${{ matrix.module }}_stats.txt
|
||||
cat reports/tests_torch_cuda_${{ matrix.module }}_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: torch_cuda_test_reports_${{ matrix.module }}
|
||||
path: reports
|
||||
|
||||
run_examples_tests:
|
||||
name: Examples PyTorch CUDA tests on Ubuntu
|
||||
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
||||
runs-on:
|
||||
group: aws-g4dn-2xlarge
|
||||
|
||||
container:
|
||||
image: diffusers/diffusers-pytorch-cuda
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host
|
||||
steps:
|
||||
- name: Checkout diffusers
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||
python -m uv pip install -e [quality,test,training]
|
||||
|
||||
- name: Environment
|
||||
run: |
|
||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||
python utils/print_env.py
|
||||
|
||||
- name: Run example tests on GPU
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
run: |
|
||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||
python -m uv pip install timm
|
||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
run: |
|
||||
cat reports/examples_torch_cuda_stats.txt
|
||||
cat reports/examples_torch_cuda_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: examples_test_reports
|
||||
path: reports
|
||||
|
||||
2
.github/workflows/push_tests.yml
vendored
2
.github/workflows/push_tests.yml
vendored
@@ -349,7 +349,6 @@ jobs:
|
||||
container:
|
||||
image: diffusers/diffusers-pytorch-cuda
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host
|
||||
|
||||
steps:
|
||||
- name: Checkout diffusers
|
||||
uses: actions/checkout@v3
|
||||
@@ -359,7 +358,6 @@ jobs:
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||
|
||||
14
.github/workflows/run_tests_from_a_pr.yml
vendored
14
.github/workflows/run_tests_from_a_pr.yml
vendored
@@ -7,8 +7,8 @@ on:
|
||||
default: 'diffusers/diffusers-pytorch-cuda'
|
||||
description: 'Name of the Docker image'
|
||||
required: true
|
||||
branch:
|
||||
description: 'PR Branch to test on'
|
||||
pr_number:
|
||||
description: 'PR number to test on'
|
||||
required: true
|
||||
test:
|
||||
description: 'Tests to run (e.g.: `tests/models`).'
|
||||
@@ -43,8 +43,8 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! "$PY_TEST" =~ ^tests/(models|pipelines) ]]; then
|
||||
echo "Error: The input string must contain either 'models' or 'pipelines' after 'tests/'."
|
||||
if [[ ! "$PY_TEST" =~ ^tests/(models|pipelines|lora) ]]; then
|
||||
echo "Error: The input string must contain either 'models', 'pipelines', or 'lora' after 'tests/'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -53,13 +53,13 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
echo "$PY_TEST"
|
||||
|
||||
shell: bash -e {0}
|
||||
|
||||
- name: Checkout PR branch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.inputs.branch }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
|
||||
ref: refs/pull/${{ inputs.pr_number }}/head
|
||||
|
||||
- name: Install pytest
|
||||
run: |
|
||||
|
||||
3
.github/workflows/trufflehog.yml
vendored
3
.github/workflows/trufflehog.yml
vendored
@@ -13,3 +13,6 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- name: Secret Scanning
|
||||
uses: trufflesecurity/trufflehog@main
|
||||
with:
|
||||
extra_args: --results=verified,unknown
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
<!---
|
||||
Copyright 2022 - The HuggingFace Team. All rights reserved.
|
||||
|
||||
|
||||
@@ -89,6 +89,8 @@
|
||||
title: Kandinsky
|
||||
- local: using-diffusers/ip_adapter
|
||||
title: IP-Adapter
|
||||
- local: using-diffusers/omnigen
|
||||
title: OmniGen
|
||||
- local: using-diffusers/pag
|
||||
title: PAG
|
||||
- local: using-diffusers/controlnet
|
||||
@@ -276,6 +278,8 @@
|
||||
title: ConsisIDTransformer3DModel
|
||||
- local: api/models/cogview3plus_transformer2d
|
||||
title: CogView3PlusTransformer2DModel
|
||||
- local: api/models/cogview4_transformer2d
|
||||
title: CogView4Transformer2DModel
|
||||
- local: api/models/dit_transformer2d
|
||||
title: DiTTransformer2DModel
|
||||
- local: api/models/flux_transformer
|
||||
@@ -288,10 +292,14 @@
|
||||
title: LatteTransformer3DModel
|
||||
- local: api/models/lumina_nextdit2d
|
||||
title: LuminaNextDiT2DModel
|
||||
- local: api/models/lumina2_transformer2d
|
||||
title: Lumina2Transformer2DModel
|
||||
- local: api/models/ltx_video_transformer3d
|
||||
title: LTXVideoTransformer3DModel
|
||||
- local: api/models/mochi_transformer3d
|
||||
title: MochiTransformer3DModel
|
||||
- local: api/models/omnigen_transformer
|
||||
title: OmniGenTransformer2DModel
|
||||
- local: api/models/pixart_transformer2d
|
||||
title: PixArtTransformer2DModel
|
||||
- local: api/models/prior_transformer
|
||||
@@ -376,6 +384,8 @@
|
||||
title: CogVideoX
|
||||
- local: api/pipelines/cogview3
|
||||
title: CogView3
|
||||
- local: api/pipelines/cogview4
|
||||
title: CogView4
|
||||
- local: api/pipelines/consisid
|
||||
title: ConsisID
|
||||
- local: api/pipelines/consistency_models
|
||||
@@ -438,6 +448,8 @@
|
||||
title: LEDITS++
|
||||
- local: api/pipelines/ltx_video
|
||||
title: LTXVideo
|
||||
- local: api/pipelines/lumina2
|
||||
title: Lumina 2.0
|
||||
- local: api/pipelines/lumina
|
||||
title: Lumina-T2X
|
||||
- local: api/pipelines/marigold
|
||||
@@ -448,6 +460,8 @@
|
||||
title: MultiDiffusion
|
||||
- local: api/pipelines/musicldm
|
||||
title: MusicLDM
|
||||
- local: api/pipelines/omnigen
|
||||
title: OmniGen
|
||||
- local: api/pipelines/pag
|
||||
title: PAG
|
||||
- local: api/pipelines/paint_by_example
|
||||
@@ -529,6 +543,10 @@
|
||||
title: Overview
|
||||
- local: api/schedulers/cm_stochastic_iterative
|
||||
title: CMStochasticIterativeScheduler
|
||||
- local: api/schedulers/ddim_cogvideox
|
||||
title: CogVideoXDDIMScheduler
|
||||
- local: api/schedulers/multistep_dpm_solver_cogvideox
|
||||
title: CogVideoXDPMScheduler
|
||||
- local: api/schedulers/consistency_decoder
|
||||
title: ConsistencyDecoderScheduler
|
||||
- local: api/schedulers/cosine_dpm
|
||||
|
||||
@@ -25,3 +25,16 @@ Customized activation functions for supporting various models in 🤗 Diffusers.
|
||||
## ApproximateGELU
|
||||
|
||||
[[autodoc]] models.activations.ApproximateGELU
|
||||
|
||||
|
||||
## SwiGLU
|
||||
|
||||
[[autodoc]] models.activations.SwiGLU
|
||||
|
||||
## FP32SiLU
|
||||
|
||||
[[autodoc]] models.activations.FP32SiLU
|
||||
|
||||
## LinearActivation
|
||||
|
||||
[[autodoc]] models.activations.LinearActivation
|
||||
|
||||
@@ -147,3 +147,20 @@ An attention processor is a class for applying different types of attention mech
|
||||
## XLAFlashAttnProcessor2_0
|
||||
|
||||
[[autodoc]] models.attention_processor.XLAFlashAttnProcessor2_0
|
||||
|
||||
## XFormersJointAttnProcessor
|
||||
|
||||
[[autodoc]] models.attention_processor.XFormersJointAttnProcessor
|
||||
|
||||
## IPAdapterXFormersAttnProcessor
|
||||
|
||||
[[autodoc]] models.attention_processor.IPAdapterXFormersAttnProcessor
|
||||
|
||||
## FluxIPAdapterJointAttnProcessor2_0
|
||||
|
||||
[[autodoc]] models.attention_processor.FluxIPAdapterJointAttnProcessor2_0
|
||||
|
||||
|
||||
## XLAFluxFlashAttnProcessor2_0
|
||||
|
||||
[[autodoc]] models.attention_processor.XLAFluxFlashAttnProcessor2_0
|
||||
@@ -20,6 +20,10 @@ LoRA is a fast and lightweight training method that inserts and trains a signifi
|
||||
- [`FluxLoraLoaderMixin`] provides similar functions for [Flux](https://huggingface.co/docs/diffusers/main/en/api/pipelines/flux).
|
||||
- [`CogVideoXLoraLoaderMixin`] provides similar functions for [CogVideoX](https://huggingface.co/docs/diffusers/main/en/api/pipelines/cogvideox).
|
||||
- [`Mochi1LoraLoaderMixin`] provides similar functions for [Mochi](https://huggingface.co/docs/diffusers/main/en/api/pipelines/mochi).
|
||||
- [`LTXVideoLoraLoaderMixin`] provides similar functions for [LTX-Video](https://huggingface.co/docs/diffusers/main/en/api/pipelines/ltx_video).
|
||||
- [`SanaLoraLoaderMixin`] provides similar functions for [Sana](https://huggingface.co/docs/diffusers/main/en/api/pipelines/sana).
|
||||
- [`HunyuanVideoLoraLoaderMixin`] provides similar functions for [HunyuanVideo](https://huggingface.co/docs/diffusers/main/en/api/pipelines/hunyuan_video).
|
||||
- [`Lumina2LoraLoaderMixin`] provides similar functions for [Lumina2](https://huggingface.co/docs/diffusers/main/en/api/pipelines/lumina2).
|
||||
- [`AmusedLoraLoaderMixin`] is for the [`AmusedPipeline`].
|
||||
- [`LoraBaseMixin`] provides a base class with several utility methods to fuse, unfuse, unload, LoRAs and more.
|
||||
|
||||
@@ -53,6 +57,22 @@ To learn more about how to load LoRA weights, see the [LoRA](../../using-diffuse
|
||||
|
||||
[[autodoc]] loaders.lora_pipeline.Mochi1LoraLoaderMixin
|
||||
|
||||
## LTXVideoLoraLoaderMixin
|
||||
|
||||
[[autodoc]] loaders.lora_pipeline.LTXVideoLoraLoaderMixin
|
||||
|
||||
## SanaLoraLoaderMixin
|
||||
|
||||
[[autodoc]] loaders.lora_pipeline.SanaLoraLoaderMixin
|
||||
|
||||
## HunyuanVideoLoraLoaderMixin
|
||||
|
||||
[[autodoc]] loaders.lora_pipeline.HunyuanVideoLoraLoaderMixin
|
||||
|
||||
## Lumina2LoraLoaderMixin
|
||||
|
||||
[[autodoc]] loaders.lora_pipeline.Lumina2LoraLoaderMixin
|
||||
|
||||
## AmusedLoraLoaderMixin
|
||||
|
||||
[[autodoc]] loaders.lora_pipeline.AmusedLoraLoaderMixin
|
||||
|
||||
30
docs/source/en/api/models/cogview4_transformer2d.md
Normal file
30
docs/source/en/api/models/cogview4_transformer2d.md
Normal file
@@ -0,0 +1,30 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License. -->
|
||||
|
||||
# CogView4Transformer2DModel
|
||||
|
||||
A Diffusion Transformer model for 2D data from [CogView4]()
|
||||
|
||||
The model can be loaded with the following code snippet.
|
||||
|
||||
```python
|
||||
from diffusers import CogView4Transformer2DModel
|
||||
|
||||
transformer = CogView4Transformer2DModel.from_pretrained("THUDM/CogView4-6B", subfolder="transformer", torch_dtype=torch.bfloat16).to("cuda")
|
||||
```
|
||||
|
||||
## CogView4Transformer2DModel
|
||||
|
||||
[[autodoc]] CogView4Transformer2DModel
|
||||
|
||||
## Transformer2DModelOutput
|
||||
|
||||
[[autodoc]] models.modeling_outputs.Transformer2DModelOutput
|
||||
30
docs/source/en/api/models/lumina2_transformer2d.md
Normal file
30
docs/source/en/api/models/lumina2_transformer2d.md
Normal file
@@ -0,0 +1,30 @@
|
||||
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License. -->
|
||||
|
||||
# Lumina2Transformer2DModel
|
||||
|
||||
A Diffusion Transformer model for 3D video-like data was introduced in [Lumina Image 2.0](https://huggingface.co/Alpha-VLLM/Lumina-Image-2.0) by Alpha-VLLM.
|
||||
|
||||
The model can be loaded with the following code snippet.
|
||||
|
||||
```python
|
||||
from diffusers import Lumina2Transformer2DModel
|
||||
|
||||
transformer = Lumina2Transformer2DModel.from_pretrained("Alpha-VLLM/Lumina-Image-2.0", subfolder="transformer", torch_dtype=torch.bfloat16)
|
||||
```
|
||||
|
||||
## Lumina2Transformer2DModel
|
||||
|
||||
[[autodoc]] Lumina2Transformer2DModel
|
||||
|
||||
## Transformer2DModelOutput
|
||||
|
||||
[[autodoc]] models.modeling_outputs.Transformer2DModelOutput
|
||||
30
docs/source/en/api/models/omnigen_transformer.md
Normal file
30
docs/source/en/api/models/omnigen_transformer.md
Normal file
@@ -0,0 +1,30 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# OmniGenTransformer2DModel
|
||||
|
||||
A Transformer model that accepts multimodal instructions to generate images for [OmniGen](https://github.com/VectorSpaceLab/OmniGen/).
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*The emergence of Large Language Models (LLMs) has unified language generation tasks and revolutionized human-machine interaction. However, in the realm of image generation, a unified model capable of handling various tasks within a single framework remains largely unexplored. In this work, we introduce OmniGen, a new diffusion model for unified image generation. OmniGen is characterized by the following features: 1) Unification: OmniGen not only demonstrates text-to-image generation capabilities but also inherently supports various downstream tasks, such as image editing, subject-driven generation, and visual conditional generation. 2) Simplicity: The architecture of OmniGen is highly simplified, eliminating the need for additional plugins. Moreover, compared to existing diffusion models, it is more user-friendly and can complete complex tasks end-to-end through instructions without the need for extra intermediate steps, greatly simplifying the image generation workflow. 3) Knowledge Transfer: Benefit from learning in a unified format, OmniGen effectively transfers knowledge across different tasks, manages unseen tasks and domains, and exhibits novel capabilities. We also explore the model’s reasoning capabilities and potential applications of the chain-of-thought mechanism. This work represents the first attempt at a general-purpose image generation model, and we will release our resources at https://github.com/VectorSpaceLab/OmniGen to foster future advancements.*
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import OmniGenTransformer2DModel
|
||||
|
||||
transformer = OmniGenTransformer2DModel.from_pretrained("Shitao/OmniGen-v1-diffusers", subfolder="transformer", torch_dtype=torch.bfloat16)
|
||||
```
|
||||
|
||||
## OmniGenTransformer2DModel
|
||||
|
||||
[[autodoc]] OmniGenTransformer2DModel
|
||||
@@ -29,3 +29,43 @@ Customized normalization layers for supporting various models in 🤗 Diffusers.
|
||||
## AdaGroupNorm
|
||||
|
||||
[[autodoc]] models.normalization.AdaGroupNorm
|
||||
|
||||
## AdaLayerNormContinuous
|
||||
|
||||
[[autodoc]] models.normalization.AdaLayerNormContinuous
|
||||
|
||||
## RMSNorm
|
||||
|
||||
[[autodoc]] models.normalization.RMSNorm
|
||||
|
||||
## GlobalResponseNorm
|
||||
|
||||
[[autodoc]] models.normalization.GlobalResponseNorm
|
||||
|
||||
|
||||
## LuminaLayerNormContinuous
|
||||
[[autodoc]] models.normalization.LuminaLayerNormContinuous
|
||||
|
||||
## SD35AdaLayerNormZeroX
|
||||
[[autodoc]] models.normalization.SD35AdaLayerNormZeroX
|
||||
|
||||
## AdaLayerNormZeroSingle
|
||||
[[autodoc]] models.normalization.AdaLayerNormZeroSingle
|
||||
|
||||
## LuminaRMSNormZero
|
||||
[[autodoc]] models.normalization.LuminaRMSNormZero
|
||||
|
||||
## LpNorm
|
||||
[[autodoc]] models.normalization.LpNorm
|
||||
|
||||
## CogView3PlusAdaLayerNormZeroTextImage
|
||||
[[autodoc]] models.normalization.CogView3PlusAdaLayerNormZeroTextImage
|
||||
|
||||
## CogVideoXLayerNormZero
|
||||
[[autodoc]] models.normalization.CogVideoXLayerNormZero
|
||||
|
||||
## MochiRMSNormZero
|
||||
[[autodoc]] models.transformers.transformer_mochi.MochiRMSNormZero
|
||||
|
||||
## MochiRMSNorm
|
||||
[[autodoc]] models.normalization.MochiRMSNorm
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Text-to-Video Generation with AnimateDiff
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
## Overview
|
||||
|
||||
[AnimateDiff: Animate Your Personalized Text-to-Image Diffusion Models without Specific Tuning](https://arxiv.org/abs/2307.04725) by Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, Bo Dai.
|
||||
|
||||
@@ -15,6 +15,10 @@
|
||||
|
||||
# CogVideoX
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer](https://arxiv.org/abs/2408.06072) from Tsinghua University & ZhipuAI, by Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, Da Yin, Xiaotao Gu, Yuxuan Zhang, Weihan Wang, Yean Cheng, Ting Liu, Bin Xu, Yuxiao Dong, Jie Tang.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
34
docs/source/en/api/pipelines/cogview4.md
Normal file
34
docs/source/en/api/pipelines/cogview4.md
Normal file
@@ -0,0 +1,34 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
-->
|
||||
|
||||
# CogView4
|
||||
|
||||
<Tip>
|
||||
|
||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||
|
||||
</Tip>
|
||||
|
||||
This pipeline was contributed by [zRzRzRzRzRzRzR](https://github.com/zRzRzRzRzRzRzR). The original codebase can be found [here](https://huggingface.co/THUDM). The original weights can be found under [hf.co/THUDM](https://huggingface.co/THUDM).
|
||||
|
||||
## CogView4Pipeline
|
||||
|
||||
[[autodoc]] CogView4Pipeline
|
||||
- all
|
||||
- __call__
|
||||
|
||||
## CogView4PipelineOutput
|
||||
|
||||
[[autodoc]] pipelines.cogview4.pipeline_output.CogView4PipelineOutput
|
||||
@@ -15,6 +15,10 @@
|
||||
|
||||
# ConsisID
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[Identity-Preserving Text-to-Video Generation by Frequency Decomposition](https://arxiv.org/abs/2411.17440) from Peking University & University of Rochester & etc, by Shenghai Yuan, Jinfa Huang, Xianyi He, Yunyang Ge, Yujun Shi, Liuhan Chen, Jiebo Luo, Li Yuan.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# FluxControlInpaint
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
FluxControlInpaintPipeline is an implementation of Inpainting for Flux.1 Depth/Canny models. It is a pipeline that allows you to inpaint images using the Flux.1 Depth/Canny models. The pipeline takes an image and a mask as input and returns the inpainted image.
|
||||
|
||||
FLUX.1 Depth and Canny [dev] is a 12 billion parameter rectified flow transformer capable of generating an image based on a text description while following the structure of a given input image. **This is not a ControlNet model**.
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# ControlNet
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
ControlNet was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala.
|
||||
|
||||
With a ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide a depth map, the ControlNet model generates an image that'll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process.
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# ControlNet with Flux.1
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
FluxControlNetPipeline is an implementation of ControlNet for Flux.1.
|
||||
|
||||
ControlNet was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala.
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# ControlNet with Stable Diffusion 3
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
StableDiffusion3ControlNetPipeline is an implementation of ControlNet for Stable Diffusion 3.
|
||||
|
||||
ControlNet was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala.
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# ControlNet with Stable Diffusion XL
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
ControlNet was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala.
|
||||
|
||||
With a ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide a depth map, the ControlNet model generates an image that'll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process.
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# ControlNetUnion
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
ControlNetUnionModel is an implementation of ControlNet for Stable Diffusion XL.
|
||||
|
||||
The ControlNet model was introduced in [ControlNetPlus](https://github.com/xinsir6/ControlNetPlus) by xinsir6. It supports multiple conditioning inputs without increasing computation.
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# ControlNet-XS
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
ControlNet-XS was introduced in [ControlNet-XS](https://vislearn.github.io/ControlNet-XS/) by Denis Zavadski and Carsten Rother. It is based on the observation that the control model in the [original ControlNet](https://huggingface.co/papers/2302.05543) can be made much smaller and still produce good results.
|
||||
|
||||
Like the original ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide a depth map, the ControlNet model generates an image that'll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process.
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# DeepFloyd IF
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
## Overview
|
||||
|
||||
DeepFloyd IF is a novel state-of-the-art open-source text-to-image model with a high degree of photorealism and language understanding.
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Flux
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
Flux is a series of text-to-image generation models based on diffusion transformers. To know more about Flux, check out the original [blog post](https://blackforestlabs.ai/announcing-black-forest-labs/) by the creators of Flux, Black Forest Labs.
|
||||
|
||||
Original model checkpoints for Flux can be found [here](https://huggingface.co/black-forest-labs). Original inference code can be found [here](https://github.com/black-forest-labs/flux).
|
||||
@@ -355,8 +359,74 @@ image.save('flux_ip_adapter_output.jpg')
|
||||
<figcaption class="mt-2 text-sm text-center text-gray-500">IP-Adapter examples with prompt "wearing sunglasses"</figcaption>
|
||||
</div>
|
||||
|
||||
## Optimize
|
||||
|
||||
## Running FP16 inference
|
||||
Flux is a very large model and requires ~50GB of RAM/VRAM to load all the modeling components. Enable some of the optimizations below to lower the memory requirements.
|
||||
|
||||
### Group offloading
|
||||
|
||||
[Group offloading](../../optimization/memory#group-offloading) lowers VRAM usage by offloading groups of internal layers rather than the whole model or weights. You need to use [`~hooks.apply_group_offloading`] on all the model components of a pipeline. The `offload_type` parameter allows you to toggle between block and leaf-level offloading. Setting it to `leaf_level` offloads the lowest leaf-level parameters to the CPU instead of offloading at the module-level.
|
||||
|
||||
On CUDA devices that support asynchronous data streaming, set `use_stream=True` to overlap data transfer and computation to accelerate inference.
|
||||
|
||||
> [!TIP]
|
||||
> It is possible to mix block and leaf-level offloading for different components in a pipeline.
|
||||
|
||||
```py
|
||||
import torch
|
||||
from diffusers import FluxPipeline
|
||||
from diffusers.hooks import apply_group_offloading
|
||||
|
||||
model_id = "black-forest-labs/FLUX.1-dev"
|
||||
dtype = torch.bfloat16
|
||||
pipe = FluxPipeline.from_pretrained(
|
||||
model_id,
|
||||
torch_dtype=dtype,
|
||||
)
|
||||
|
||||
apply_group_offloading(
|
||||
pipe.transformer,
|
||||
offload_type="leaf_level",
|
||||
offload_device=torch.device("cpu"),
|
||||
onload_device=torch.device("cuda"),
|
||||
use_stream=True,
|
||||
)
|
||||
apply_group_offloading(
|
||||
pipe.text_encoder,
|
||||
offload_device=torch.device("cpu"),
|
||||
onload_device=torch.device("cuda"),
|
||||
offload_type="leaf_level",
|
||||
use_stream=True,
|
||||
)
|
||||
apply_group_offloading(
|
||||
pipe.text_encoder_2,
|
||||
offload_device=torch.device("cpu"),
|
||||
onload_device=torch.device("cuda"),
|
||||
offload_type="leaf_level",
|
||||
use_stream=True,
|
||||
)
|
||||
apply_group_offloading(
|
||||
pipe.vae,
|
||||
offload_device=torch.device("cpu"),
|
||||
onload_device=torch.device("cuda"),
|
||||
offload_type="leaf_level",
|
||||
use_stream=True,
|
||||
)
|
||||
|
||||
prompt="A cat wearing sunglasses and working as a lifeguard at pool."
|
||||
|
||||
generator = torch.Generator().manual_seed(181201)
|
||||
image = pipe(
|
||||
prompt,
|
||||
width=576,
|
||||
height=1024,
|
||||
num_inference_steps=30,
|
||||
generator=generator
|
||||
).images[0]
|
||||
image
|
||||
```
|
||||
|
||||
### Running FP16 inference
|
||||
|
||||
Flux can generate high-quality images with FP16 (i.e. to accelerate inference on Turing/Volta GPUs) but produces different outputs compared to FP32/BF16. The issue is that some activations in the text encoders have to be clipped when running in FP16, which affects the overall image. Forcing text encoders to run with FP32 inference thus removes this output difference. See [here](https://github.com/huggingface/diffusers/pull/9097#issuecomment-2272292516) for details.
|
||||
|
||||
@@ -385,7 +455,7 @@ out = pipe(
|
||||
out.save("image.png")
|
||||
```
|
||||
|
||||
## Quantization
|
||||
### Quantization
|
||||
|
||||
Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model.
|
||||
|
||||
|
||||
@@ -14,6 +14,10 @@
|
||||
|
||||
# HunyuanVideo
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[HunyuanVideo](https://www.arxiv.org/abs/2412.03603) by Tencent.
|
||||
|
||||
*Recent advancements in video generation have significantly impacted daily life for both individuals and industries. However, the leading video generation models remain closed-source, resulting in a notable performance gap between industry capabilities and those available to the public. In this report, we introduce HunyuanVideo, an innovative open-source video foundation model that demonstrates performance in video generation comparable to, or even surpassing, that of leading closed-source models. HunyuanVideo encompasses a comprehensive framework that integrates several key elements, including data curation, advanced architectural design, progressive model scaling and training, and an efficient infrastructure tailored for large-scale model training and inference. As a result, we successfully trained a video generative model with over 13 billion parameters, making it the largest among all open-source models. We conducted extensive experiments and implemented a series of targeted designs to ensure high visual quality, motion dynamics, text-video alignment, and advanced filming techniques. According to evaluations by professionals, HunyuanVideo outperforms previous state-of-the-art models, including Runway Gen-3, Luma 1.6, and three top-performing Chinese video generative models. By releasing the code for the foundation model and its applications, we aim to bridge the gap between closed-source and open-source communities. This initiative will empower individuals within the community to experiment with their ideas, fostering a more dynamic and vibrant video generation ecosystem. The code is publicly available at [this https URL](https://github.com/tencent/HunyuanVideo).*
|
||||
@@ -32,6 +36,21 @@ Recommendations for inference:
|
||||
- For smaller resolution videos, try lower values of `shift` (between `2.0` to `5.0`) in the [Scheduler](https://huggingface.co/docs/diffusers/main/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler.shift). For larger resolution images, try higher values (between `7.0` and `12.0`). The default value is `7.0` for HunyuanVideo.
|
||||
- For more information about supported resolutions and other details, please refer to the original repository [here](https://github.com/Tencent/HunyuanVideo/).
|
||||
|
||||
## Available models
|
||||
|
||||
The following models are available for the [`HunyuanVideoPipeline`](text-to-video) pipeline:
|
||||
|
||||
| Model name | Description |
|
||||
|:---|:---|
|
||||
| [`hunyuanvideo-community/HunyuanVideo`](https://huggingface.co/hunyuanvideo-community/HunyuanVideo) | Official HunyuanVideo (guidance-distilled). Performs best at multiple resolutions and frames. Performs best with `guidance_scale=6.0`, `true_cfg_scale=1.0` and without a negative prompt. |
|
||||
| [`https://huggingface.co/Skywork/SkyReels-V1-Hunyuan-T2V`](https://huggingface.co/Skywork/SkyReels-V1-Hunyuan-T2V) | Skywork's custom finetune of HunyuanVideo (de-distilled). Performs best with `97x544x960` resolution, `guidance_scale=1.0`, `true_cfg_scale=6.0` and a negative prompt. |
|
||||
|
||||
The following models are available for the image-to-video pipeline:
|
||||
|
||||
| Model name | Description |
|
||||
|:---|:---|
|
||||
| [`https://huggingface.co/Skywork/SkyReels-V1-Hunyuan-I2V`](https://huggingface.co/Skywork/SkyReels-V1-Hunyuan-I2V) | Skywork's custom finetune of HunyuanVideo (de-distilled). Performs best with `97x544x960` resolution. Performs best at `97x544x960` resolution, `guidance_scale=1.0`, `true_cfg_scale=6.0` and a negative prompt. |
|
||||
|
||||
## Quantization
|
||||
|
||||
Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model.
|
||||
|
||||
@@ -9,6 +9,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Kandinsky 3
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
Kandinsky 3 is created by [Vladimir Arkhipkin](https://github.com/oriBetelgeuse),[Anastasia Maltseva](https://github.com/NastyaMittseva),[Igor Pavlov](https://github.com/boomb0om),[Andrei Filatov](https://github.com/anvilarth),[Arseniy Shakhmatov](https://github.com/cene555),[Andrey Kuznetsov](https://github.com/kuznetsoffandrey),[Denis Dimitrov](https://github.com/denndimitrov), [Zein Shaheen](https://github.com/zeinsh)
|
||||
|
||||
The description from it's GitHub page:
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Kolors: Effective Training of Diffusion Model for Photorealistic Text-to-Image Synthesis
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||

|
||||
|
||||
Kolors is a large-scale text-to-image generation model based on latent diffusion, developed by [the Kuaishou Kolors team](https://github.com/Kwai-Kolors/Kolors). Trained on billions of text-image pairs, Kolors exhibits significant advantages over both open-source and closed-source models in visual quality, complex semantic accuracy, and text rendering for both Chinese and English characters. Furthermore, Kolors supports both Chinese and English inputs, demonstrating strong performance in understanding and generating Chinese-specific content. For more details, please refer to this [technical report](https://github.com/Kwai-Kolors/Kolors/blob/master/imgs/Kolors_paper.pdf).
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Latent Consistency Models
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
Latent Consistency Models (LCMs) were proposed in [Latent Consistency Models: Synthesizing High-Resolution Images with Few-Step Inference](https://huggingface.co/papers/2310.04378) by Simian Luo, Yiqin Tan, Longbo Huang, Jian Li, and Hang Zhao.
|
||||
|
||||
The abstract of the paper is as follows:
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# LEDITS++
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
LEDITS++ was proposed in [LEDITS++: Limitless Image Editing using Text-to-Image Models](https://huggingface.co/papers/2311.16711) by Manuel Brack, Felix Friedrich, Katharina Kornmeier, Linoy Tsaban, Patrick Schramowski, Kristian Kersting, Apolinário Passos.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -14,6 +14,10 @@
|
||||
|
||||
# LTX Video
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[LTX Video](https://huggingface.co/Lightricks/LTX-Video) is the first DiT-based video generation model capable of generating high-quality videos in real-time. It produces 24 FPS videos at a 768x512 resolution faster than they can be watched. Trained on a large-scale dataset of diverse videos, the model generates high-resolution videos with realistic and varied content. We provide a model for both text-to-video as well as image + text-to-video usecases.
|
||||
|
||||
<Tip>
|
||||
|
||||
87
docs/source/en/api/pipelines/lumina2.md
Normal file
87
docs/source/en/api/pipelines/lumina2.md
Normal file
@@ -0,0 +1,87 @@
|
||||
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License. -->
|
||||
|
||||
# Lumina2
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[Lumina Image 2.0: A Unified and Efficient Image Generative Model](https://huggingface.co/Alpha-VLLM/Lumina-Image-2.0) is a 2 billion parameter flow-based diffusion transformer capable of generating diverse images from text descriptions.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*We introduce Lumina-Image 2.0, an advanced text-to-image model that surpasses previous state-of-the-art methods across multiple benchmarks, while also shedding light on its potential to evolve into a generalist vision intelligence model. Lumina-Image 2.0 exhibits three key properties: (1) Unification – it adopts a unified architecture that treats text and image tokens as a joint sequence, enabling natural cross-modal interactions and facilitating task expansion. Besides, since high-quality captioners can provide semantically better-aligned text-image training pairs, we introduce a unified captioning system, UniCaptioner, which generates comprehensive and precise captions for the model. This not only accelerates model convergence but also enhances prompt adherence, variable-length prompt handling, and task generalization via prompt templates. (2) Efficiency – to improve the efficiency of the unified architecture, we develop a set of optimization techniques that improve semantic learning and fine-grained texture generation during training while incorporating inference-time acceleration strategies without compromising image quality. (3) Transparency – we open-source all training details, code, and models to ensure full reproducibility, aiming to bridge the gap between well-resourced closed-source research teams and independent developers.*
|
||||
|
||||
<Tip>
|
||||
|
||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Using Single File loading with Lumina Image 2.0
|
||||
|
||||
Single file loading for Lumina Image 2.0 is available for the `Lumina2Transformer2DModel`
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import Lumina2Transformer2DModel, Lumina2Text2ImgPipeline
|
||||
|
||||
ckpt_path = "https://huggingface.co/Alpha-VLLM/Lumina-Image-2.0/blob/main/consolidated.00-of-01.pth"
|
||||
transformer = Lumina2Transformer2DModel.from_single_file(
|
||||
ckpt_path, torch_dtype=torch.bfloat16
|
||||
)
|
||||
|
||||
pipe = Lumina2Text2ImgPipeline.from_pretrained(
|
||||
"Alpha-VLLM/Lumina-Image-2.0", transformer=transformer, torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.enable_model_cpu_offload()
|
||||
image = pipe(
|
||||
"a cat holding a sign that says hello",
|
||||
generator=torch.Generator("cpu").manual_seed(0),
|
||||
).images[0]
|
||||
image.save("lumina-single-file.png")
|
||||
|
||||
```
|
||||
|
||||
## Using GGUF Quantized Checkpoints with Lumina Image 2.0
|
||||
|
||||
GGUF Quantized checkpoints for the `Lumina2Transformer2DModel` can be loaded via `from_single_file` with the `GGUFQuantizationConfig`
|
||||
|
||||
```python
|
||||
from diffusers import Lumina2Transformer2DModel, Lumina2Text2ImgPipeline, GGUFQuantizationConfig
|
||||
|
||||
ckpt_path = "https://huggingface.co/calcuis/lumina-gguf/blob/main/lumina2-q4_0.gguf"
|
||||
transformer = Lumina2Transformer2DModel.from_single_file(
|
||||
ckpt_path,
|
||||
quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16),
|
||||
torch_dtype=torch.bfloat16,
|
||||
)
|
||||
|
||||
pipe = Lumina2Text2ImgPipeline.from_pretrained(
|
||||
"Alpha-VLLM/Lumina-Image-2.0", transformer=transformer, torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.enable_model_cpu_offload()
|
||||
image = pipe(
|
||||
"a cat holding a sign that says hello",
|
||||
generator=torch.Generator("cpu").manual_seed(0),
|
||||
).images[0]
|
||||
image.save("lumina-gguf.png")
|
||||
```
|
||||
|
||||
## Lumina2Text2ImgPipeline
|
||||
|
||||
[[autodoc]] Lumina2Text2ImgPipeline
|
||||
- all
|
||||
- __call__
|
||||
@@ -1,4 +1,6 @@
|
||||
<!--Copyright 2024 Marigold authors and The HuggingFace Team. All rights reserved.
|
||||
<!--
|
||||
Copyright 2023-2025 Marigold Team, ETH Zürich. All rights reserved.
|
||||
Copyright 2024-2025 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
@@ -10,67 +12,120 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Marigold Pipelines for Computer Vision Tasks
|
||||
# Marigold Computer Vision
|
||||
|
||||

|
||||
|
||||
Marigold was proposed in [Repurposing Diffusion-Based Image Generators for Monocular Depth Estimation](https://huggingface.co/papers/2312.02145), a CVPR 2024 Oral paper by [Bingxin Ke](http://www.kebingxin.com/), [Anton Obukhov](https://www.obukhov.ai/), [Shengyu Huang](https://shengyuh.github.io/), [Nando Metzger](https://nandometzger.github.io/), [Rodrigo Caye Daudt](https://rcdaudt.github.io/), and [Konrad Schindler](https://scholar.google.com/citations?user=FZuNgqIAAAAJ&hl=en).
|
||||
The idea is to repurpose the rich generative prior of Text-to-Image Latent Diffusion Models (LDMs) for traditional computer vision tasks.
|
||||
Initially, this idea was explored to fine-tune Stable Diffusion for Monocular Depth Estimation, as shown in the teaser above.
|
||||
Later,
|
||||
- [Tianfu Wang](https://tianfwang.github.io/) trained the first Latent Consistency Model (LCM) of Marigold, which unlocked fast single-step inference;
|
||||
- [Kevin Qu](https://www.linkedin.com/in/kevin-qu-b3417621b/?locale=en_US) extended the approach to Surface Normals Estimation;
|
||||
- [Anton Obukhov](https://www.obukhov.ai/) contributed the pipelines and documentation into diffusers (enabled and supported by [YiYi Xu](https://yiyixuxu.github.io/) and [Sayak Paul](https://sayak.dev/)).
|
||||
Marigold was proposed in
|
||||
[Repurposing Diffusion-Based Image Generators for Monocular Depth Estimation](https://huggingface.co/papers/2312.02145),
|
||||
a CVPR 2024 Oral paper by
|
||||
[Bingxin Ke](http://www.kebingxin.com/),
|
||||
[Anton Obukhov](https://www.obukhov.ai/),
|
||||
[Shengyu Huang](https://shengyuh.github.io/),
|
||||
[Nando Metzger](https://nandometzger.github.io/),
|
||||
[Rodrigo Caye Daudt](https://rcdaudt.github.io/), and
|
||||
[Konrad Schindler](https://scholar.google.com/citations?user=FZuNgqIAAAAJ&hl=en).
|
||||
The core idea is to **repurpose the generative prior of Text-to-Image Latent Diffusion Models (LDMs) for traditional
|
||||
computer vision tasks**.
|
||||
This approach was explored by fine-tuning Stable Diffusion for **Monocular Depth Estimation**, as demonstrated in the
|
||||
teaser above.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*Monocular depth estimation is a fundamental computer vision task. Recovering 3D depth from a single image is geometrically ill-posed and requires scene understanding, so it is not surprising that the rise of deep learning has led to a breakthrough. The impressive progress of monocular depth estimators has mirrored the growth in model capacity, from relatively modest CNNs to large Transformer architectures. Still, monocular depth estimators tend to struggle when presented with images with unfamiliar content and layout, since their knowledge of the visual world is restricted by the data seen during training, and challenged by zero-shot generalization to new domains. This motivates us to explore whether the extensive priors captured in recent generative diffusion models can enable better, more generalizable depth estimation. We introduce Marigold, a method for affine-invariant monocular depth estimation that is derived from Stable Diffusion and retains its rich prior knowledge. The estimator can be fine-tuned in a couple of days on a single GPU using only synthetic training data. It delivers state-of-the-art performance across a wide range of datasets, including over 20% performance gains in specific cases. Project page: https://marigoldmonodepth.github.io.*
|
||||
|
||||
## Available Pipelines
|
||||
|
||||
Each pipeline supports one Computer Vision task, which takes an input RGB image as input and produces a *prediction* of the modality of interest, such as a depth map of the input image.
|
||||
Currently, the following tasks are implemented:
|
||||
|
||||
| Pipeline | Predicted Modalities | Demos |
|
||||
|---------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------:|
|
||||
| [MarigoldDepthPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py) | [Depth](https://en.wikipedia.org/wiki/Depth_map), [Disparity](https://en.wikipedia.org/wiki/Binocular_disparity) | [Fast Demo (LCM)](https://huggingface.co/spaces/prs-eth/marigold-lcm), [Slow Original Demo (DDIM)](https://huggingface.co/spaces/prs-eth/marigold) |
|
||||
| [MarigoldNormalsPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py) | [Surface normals](https://en.wikipedia.org/wiki/Normal_mapping) | [Fast Demo (LCM)](https://huggingface.co/spaces/prs-eth/marigold-normals-lcm) |
|
||||
|
||||
|
||||
## Available Checkpoints
|
||||
|
||||
The original checkpoints can be found under the [PRS-ETH](https://huggingface.co/prs-eth/) Hugging Face organization.
|
||||
Marigold was later extended in the follow-up paper,
|
||||
[Marigold: Affordable Adaptation of Diffusion-Based Image Generators for Image Analysis](https://huggingface.co/papers/2312.02145),
|
||||
authored by
|
||||
[Bingxin Ke](http://www.kebingxin.com/),
|
||||
[Kevin Qu](https://www.linkedin.com/in/kevin-qu-b3417621b/?locale=en_US),
|
||||
[Tianfu Wang](https://tianfwang.github.io/),
|
||||
[Nando Metzger](https://nandometzger.github.io/),
|
||||
[Shengyu Huang](https://shengyuh.github.io/),
|
||||
[Bo Li](https://www.linkedin.com/in/bobboli0202/),
|
||||
[Anton Obukhov](https://www.obukhov.ai/), and
|
||||
[Konrad Schindler](https://scholar.google.com/citations?user=FZuNgqIAAAAJ&hl=en).
|
||||
This work expanded Marigold to support new modalities such as **Surface Normals** and **Intrinsic Image Decomposition**
|
||||
(IID), introduced a training protocol for **Latent Consistency Models** (LCM), and demonstrated **High-Resolution** (HR)
|
||||
processing capability.
|
||||
|
||||
<Tip>
|
||||
|
||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. Also, to know more about reducing the memory usage of this pipeline, refer to the ["Reduce memory usage"] section [here](../../using-diffusers/svd#reduce-memory-usage).
|
||||
The early Marigold models (`v1-0` and earlier) were optimized for best results with at least 10 inference steps.
|
||||
LCM models were later developed to enable high-quality inference in just 1 to 4 steps.
|
||||
Marigold models `v1-1` and later use the DDIM scheduler to achieve optimal
|
||||
results in as few as 1 to 4 steps.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Available Pipelines
|
||||
|
||||
Each pipeline is tailored for a specific computer vision task, processing an input RGB image and generating a
|
||||
corresponding prediction.
|
||||
Currently, the following computer vision tasks are implemented:
|
||||
|
||||
| Pipeline | Recommended Model Checkpoints | Spaces (Interactive Apps) | Predicted Modalities |
|
||||
|---------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [MarigoldDepthPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py) | [prs-eth/marigold-depth-v1-1](https://huggingface.co/prs-eth/marigold-depth-v1-1) | [Depth Estimation](https://huggingface.co/spaces/prs-eth/marigold) | [Depth](https://en.wikipedia.org/wiki/Depth_map), [Disparity](https://en.wikipedia.org/wiki/Binocular_disparity) |
|
||||
| [MarigoldNormalsPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py) | [prs-eth/marigold-normals-v1-1](https://huggingface.co/prs-eth/marigold-normals-v1-1) | [Surface Normals Estimation](https://huggingface.co/spaces/prs-eth/marigold-normals) | [Surface normals](https://en.wikipedia.org/wiki/Normal_mapping) |
|
||||
| [MarigoldIntrinsicsPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_intrinsics.py) | [prs-eth/marigold-iid-appearance-v1-1](https://huggingface.co/prs-eth/marigold-iid-appearance-v1-1),<br>[prs-eth/marigold-iid-lighting-v1-1](https://huggingface.co/prs-eth/marigold-iid-lighting-v1-1) | [Intrinsic Image Decomposition](https://huggingface.co/spaces/prs-eth/marigold-iid) | [Albedo](https://en.wikipedia.org/wiki/Albedo), [Materials](https://www.n.aiq3d.com/wiki/roughnessmetalnessao-map), [Lighting](https://en.wikipedia.org/wiki/Diffuse_reflection) |
|
||||
|
||||
## Available Checkpoints
|
||||
|
||||
All original checkpoints are available under the [PRS-ETH](https://huggingface.co/prs-eth/) organization on Hugging Face.
|
||||
They are designed for use with diffusers pipelines and the [original codebase](https://github.com/prs-eth/marigold), which can also be used to train
|
||||
new model checkpoints.
|
||||
The following is a summary of the recommended checkpoints, all of which produce reliable results with 1 to 4 steps.
|
||||
|
||||
| Checkpoint | Modality | Comment |
|
||||
|-----------------------------------------------------------------------------------------------------|--------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [prs-eth/marigold-depth-v1-1](https://huggingface.co/prs-eth/marigold-depth-v1-1) | Depth | Affine-invariant depth prediction assigns each pixel a value between 0 (near plane) and 1 (far plane), with both planes determined by the model during inference. |
|
||||
| [prs-eth/marigold-normals-v0-1](https://huggingface.co/prs-eth/marigold-normals-v0-1) | Normals | The surface normals predictions are unit-length 3D vectors in the screen space camera, with values in the range from -1 to 1. |
|
||||
| [prs-eth/marigold-iid-appearance-v1-1](https://huggingface.co/prs-eth/marigold-iid-appearance-v1-1) | Intrinsics | InteriorVerse decomposition is comprised of Albedo and two BRDF material properties: Roughness and Metallicity. |
|
||||
| [prs-eth/marigold-iid-lighting-v1-1](https://huggingface.co/prs-eth/marigold-iid-lighting-v1-1) | Intrinsics | HyperSim decomposition of an image  \\(I\\)  is comprised of Albedo  \\(A\\), Diffuse shading  \\(S\\), and Non-diffuse residual  \\(R\\):  \\(I = A*S+R\\). |
|
||||
|
||||
<Tip>
|
||||
|
||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff
|
||||
between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to
|
||||
efficiently load the same components into multiple pipelines.
|
||||
Also, to know more about reducing the memory usage of this pipeline, refer to the ["Reduce memory usage"] section
|
||||
[here](../../using-diffusers/svd#reduce-memory-usage).
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Marigold pipelines were designed and tested only with `DDIMScheduler` and `LCMScheduler`.
|
||||
Depending on the scheduler, the number of inference steps required to get reliable predictions varies, and there is no universal value that works best across schedulers.
|
||||
Because of that, the default value of `num_inference_steps` in the `__call__` method of the pipeline is set to `None` (see the API reference).
|
||||
Unless set explicitly, its value will be taken from the checkpoint configuration `model_index.json`.
|
||||
This is done to ensure high-quality predictions when calling the pipeline with just the `image` argument.
|
||||
Marigold pipelines were designed and tested with the scheduler embedded in the model checkpoint.
|
||||
The optimal number of inference steps varies by scheduler, with no universal value that works best across all cases.
|
||||
To accommodate this, the `num_inference_steps` parameter in the pipeline's `__call__` method defaults to `None` (see the
|
||||
API reference).
|
||||
Unless set explicitly, it inherits the value from the `default_denoising_steps` field in the checkpoint configuration
|
||||
file (`model_index.json`).
|
||||
This ensures high-quality predictions when invoking the pipeline with only the `image` argument.
|
||||
|
||||
</Tip>
|
||||
|
||||
See also Marigold [usage examples](marigold_usage).
|
||||
See also Marigold [usage examples](../../using-diffusers/marigold_usage).
|
||||
|
||||
## Marigold Depth Prediction API
|
||||
|
||||
## MarigoldDepthPipeline
|
||||
[[autodoc]] MarigoldDepthPipeline
|
||||
- all
|
||||
- __call__
|
||||
|
||||
## MarigoldNormalsPipeline
|
||||
[[autodoc]] MarigoldNormalsPipeline
|
||||
- all
|
||||
- __call__
|
||||
|
||||
## MarigoldDepthOutput
|
||||
[[autodoc]] pipelines.marigold.pipeline_marigold_depth.MarigoldDepthOutput
|
||||
|
||||
## MarigoldNormalsOutput
|
||||
[[autodoc]] pipelines.marigold.pipeline_marigold_normals.MarigoldNormalsOutput
|
||||
[[autodoc]] pipelines.marigold.marigold_image_processing.MarigoldImageProcessor.visualize_depth
|
||||
|
||||
## Marigold Normals Estimation API
|
||||
[[autodoc]] MarigoldNormalsPipeline
|
||||
- __call__
|
||||
|
||||
[[autodoc]] pipelines.marigold.pipeline_marigold_normals.MarigoldNormalsOutput
|
||||
|
||||
[[autodoc]] pipelines.marigold.marigold_image_processing.MarigoldImageProcessor.visualize_normals
|
||||
|
||||
## Marigold Intrinsic Image Decomposition API
|
||||
|
||||
[[autodoc]] MarigoldIntrinsicsPipeline
|
||||
- __call__
|
||||
|
||||
[[autodoc]] pipelines.marigold.pipeline_marigold_intrinsics.MarigoldIntrinsicsOutput
|
||||
|
||||
[[autodoc]] pipelines.marigold.marigold_image_processing.MarigoldImageProcessor.visualize_intrinsics
|
||||
|
||||
@@ -15,6 +15,10 @@
|
||||
|
||||
# Mochi 1 Preview
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
> [!TIP]
|
||||
> Only a research preview of the model weights is available at the moment.
|
||||
|
||||
|
||||
80
docs/source/en/api/pipelines/omnigen.md
Normal file
80
docs/source/en/api/pipelines/omnigen.md
Normal file
@@ -0,0 +1,80 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
-->
|
||||
|
||||
# OmniGen
|
||||
|
||||
[OmniGen: Unified Image Generation](https://arxiv.org/pdf/2409.11340) from BAAI, by Shitao Xiao, Yueze Wang, Junjie Zhou, Huaying Yuan, Xingrun Xing, Ruiran Yan, Chaofan Li, Shuting Wang, Tiejun Huang, Zheng Liu.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*The emergence of Large Language Models (LLMs) has unified language generation tasks and revolutionized human-machine interaction. However, in the realm of image generation, a unified model capable of handling various tasks within a single framework remains largely unexplored. In this work, we introduce OmniGen, a new diffusion model for unified image generation. OmniGen is characterized by the following features: 1) Unification: OmniGen not only demonstrates text-to-image generation capabilities but also inherently supports various downstream tasks, such as image editing, subject-driven generation, and visual conditional generation. 2) Simplicity: The architecture of OmniGen is highly simplified, eliminating the need for additional plugins. Moreover, compared to existing diffusion models, it is more user-friendly and can complete complex tasks end-to-end through instructions without the need for extra intermediate steps, greatly simplifying the image generation workflow. 3) Knowledge Transfer: Benefit from learning in a unified format, OmniGen effectively transfers knowledge across different tasks, manages unseen tasks and domains, and exhibits novel capabilities. We also explore the model’s reasoning capabilities and potential applications of the chain-of-thought mechanism. This work represents the first attempt at a general-purpose image generation model, and we will release our resources at https://github.com/VectorSpaceLab/OmniGen to foster future advancements.*
|
||||
|
||||
<Tip>
|
||||
|
||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers.md) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading.md#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||
|
||||
</Tip>
|
||||
|
||||
This pipeline was contributed by [staoxiao](https://github.com/staoxiao). The original codebase can be found [here](https://github.com/VectorSpaceLab/OmniGen). The original weights can be found under [hf.co/shitao](https://huggingface.co/Shitao/OmniGen-v1).
|
||||
|
||||
## Inference
|
||||
|
||||
First, load the pipeline:
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import OmniGenPipeline
|
||||
|
||||
pipe = OmniGenPipeline.from_pretrained("Shitao/OmniGen-v1-diffusers", torch_dtype=torch.bfloat16)
|
||||
pipe.to("cuda")
|
||||
```
|
||||
|
||||
For text-to-image, pass a text prompt. By default, OmniGen generates a 1024x1024 image.
|
||||
You can try setting the `height` and `width` parameters to generate images with different size.
|
||||
|
||||
```python
|
||||
prompt = "Realistic photo. A young woman sits on a sofa, holding a book and facing the camera. She wears delicate silver hoop earrings adorned with tiny, sparkling diamonds that catch the light, with her long chestnut hair cascading over her shoulders. Her eyes are focused and gentle, framed by long, dark lashes. She is dressed in a cozy cream sweater, which complements her warm, inviting smile. Behind her, there is a table with a cup of water in a sleek, minimalist blue mug. The background is a serene indoor setting with soft natural light filtering through a window, adorned with tasteful art and flowers, creating a cozy and peaceful ambiance. 4K, HD."
|
||||
image = pipe(
|
||||
prompt=prompt,
|
||||
height=1024,
|
||||
width=1024,
|
||||
guidance_scale=3,
|
||||
generator=torch.Generator(device="cpu").manual_seed(111),
|
||||
).images[0]
|
||||
image.save("output.png")
|
||||
```
|
||||
|
||||
OmniGen supports multimodal inputs.
|
||||
When the input includes an image, you need to add a placeholder `<img><|image_1|></img>` in the text prompt to represent the image.
|
||||
It is recommended to enable `use_input_image_size_as_output` to keep the edited image the same size as the original image.
|
||||
|
||||
```python
|
||||
prompt="<img><|image_1|></img> Remove the woman's earrings. Replace the mug with a clear glass filled with sparkling iced cola."
|
||||
input_images=[load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/t2i_woman_with_book.png")]
|
||||
image = pipe(
|
||||
prompt=prompt,
|
||||
input_images=input_images,
|
||||
guidance_scale=2,
|
||||
img_guidance_scale=1.6,
|
||||
use_input_image_size_as_output=True,
|
||||
generator=torch.Generator(device="cpu").manual_seed(222)).images[0]
|
||||
image.save("output.png")
|
||||
```
|
||||
|
||||
## OmniGenPipeline
|
||||
|
||||
[[autodoc]] OmniGenPipeline
|
||||
- all
|
||||
- __call__
|
||||
@@ -54,7 +54,7 @@ The table below lists all the pipelines currently available in 🤗 Diffusers an
|
||||
| [DiT](dit) | text2image |
|
||||
| [Flux](flux) | text2image |
|
||||
| [Hunyuan-DiT](hunyuandit) | text2image |
|
||||
| [I2VGen-XL](i2vgenxl) | text2video |
|
||||
| [I2VGen-XL](i2vgenxl) | image2video |
|
||||
| [InstructPix2Pix](pix2pix) | image editing |
|
||||
| [Kandinsky 2.1](kandinsky) | text2image, image2image, inpainting, interpolation |
|
||||
| [Kandinsky 2.2](kandinsky_v22) | text2image, image2image, inpainting |
|
||||
@@ -65,7 +65,7 @@ The table below lists all the pipelines currently available in 🤗 Diffusers an
|
||||
| [Latte](latte) | text2image |
|
||||
| [LEDITS++](ledits_pp) | image editing |
|
||||
| [Lumina-T2X](lumina) | text2image |
|
||||
| [Marigold](marigold) | depth |
|
||||
| [Marigold](marigold) | depth-estimation, normals-estimation, intrinsic-decomposition |
|
||||
| [MultiDiffusion](panorama) | text2image |
|
||||
| [MusicLDM](musicldm) | text2audio |
|
||||
| [PAG](pag) | text2image |
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Perturbed-Attention Guidance
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[Perturbed-Attention Guidance (PAG)](https://ku-cvlab.github.io/Perturbed-Attention-Guidance/) is a new diffusion sampling guidance that improves sample quality across both unconditional and conditional settings, achieving this without requiring further training or the integration of external modules.
|
||||
|
||||
PAG was introduced in [Self-Rectifying Diffusion Sampling with Perturbed-Attention Guidance](https://huggingface.co/papers/2403.17377) by Donghoon Ahn, Hyoungwon Cho, Jaewon Min, Wooseok Jang, Jungwoo Kim, SeonHwa Kim, Hyun Hee Park, Kyong Hwan Jin and Seungryong Kim.
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# MultiDiffusion
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[MultiDiffusion: Fusing Diffusion Paths for Controlled Image Generation](https://huggingface.co/papers/2302.08113) is by Omer Bar-Tal, Lior Yariv, Yaron Lipman, and Tali Dekel.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Image-to-Video Generation with PIA (Personalized Image Animator)
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
## Overview
|
||||
|
||||
[PIA: Your Personalized Image Animator via Plug-and-Play Modules in Text-to-Image Models](https://arxiv.org/abs/2312.13964) by Yiming Zhang, Zhening Xing, Yanhong Zeng, Youqing Fang, Kai Chen
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# InstructPix2Pix
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[InstructPix2Pix: Learning to Follow Image Editing Instructions](https://huggingface.co/papers/2211.09800) is by Tim Brooks, Aleksander Holynski and Alexei A. Efros.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -14,6 +14,10 @@
|
||||
|
||||
# SanaPipeline
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[SANA: Efficient High-Resolution Image Synthesis with Linear Diffusion Transformers](https://huggingface.co/papers/2410.10629) from NVIDIA and MIT HAN Lab, by Enze Xie, Junsong Chen, Junyu Chen, Han Cai, Haotian Tang, Yujun Lin, Zhekai Zhang, Muyang Li, Ligeng Zhu, Yao Lu, Song Han.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Depth-to-image
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
The Stable Diffusion model can also infer depth based on an image using [MiDaS](https://github.com/isl-org/MiDaS). This allows you to pass a text prompt and an initial image to condition the generation of new images as well as a `depth_map` to preserve the image structure.
|
||||
|
||||
<Tip>
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Image-to-image
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
The Stable Diffusion model can also be applied to image-to-image generation by passing a text prompt and an initial image to condition the generation of new images.
|
||||
|
||||
The [`StableDiffusionImg2ImgPipeline`] uses the diffusion-denoising mechanism proposed in [SDEdit: Guided Image Synthesis and Editing with Stochastic Differential Equations](https://huggingface.co/papers/2108.01073) by Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jiajun Wu, Jun-Yan Zhu, Stefano Ermon.
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Inpainting
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
The Stable Diffusion model can also be applied to inpainting which lets you edit specific parts of an image by providing a mask and a text prompt using Stable Diffusion.
|
||||
|
||||
## Tips
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Text-to-(RGB, depth)
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
LDM3D was proposed in [LDM3D: Latent Diffusion Model for 3D](https://huggingface.co/papers/2305.10853) by Gabriela Ben Melech Stan, Diana Wofk, Scottie Fox, Alex Redden, Will Saxton, Jean Yu, Estelle Aflalo, Shao-Yen Tseng, Fabio Nonato, Matthias Muller, and Vasudev Lal. LDM3D generates an image and a depth map from a given text prompt unlike the existing text-to-image diffusion models such as [Stable Diffusion](./overview) which only generates an image. With almost the same number of parameters, LDM3D achieves to create a latent space that can compress both the RGB images and the depth maps.
|
||||
|
||||
Two checkpoints are available for use:
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Stable Diffusion pipelines
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
Stable Diffusion is a text-to-image latent diffusion model created by the researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/) and [LAION](https://laion.ai/). Latent diffusion applies the diffusion process over a lower dimensional latent space to reduce memory and compute complexity. This specific type of diffusion model was proposed in [High-Resolution Image Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) by Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, Björn Ommer.
|
||||
|
||||
Stable Diffusion is trained on 512x512 images from a subset of the LAION-5B dataset. This model uses a frozen CLIP ViT-L/14 text encoder to condition the model on text prompts. With its 860M UNet and 123M text encoder, the model is relatively lightweight and can run on consumer GPUs.
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Stable Diffusion 3
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
Stable Diffusion 3 (SD3) was proposed in [Scaling Rectified Flow Transformers for High-Resolution Image Synthesis](https://arxiv.org/pdf/2403.03206.pdf) by Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Muller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, Dustin Podell, Tim Dockhorn, Zion English, Kyle Lacey, Alex Goodwin, Yannik Marek, and Robin Rombach.
|
||||
|
||||
The abstract from the paper is:
|
||||
@@ -77,7 +81,7 @@ from diffusers import StableDiffusion3Pipeline
|
||||
from transformers import SiglipVisionModel, SiglipImageProcessor
|
||||
|
||||
image_encoder_id = "google/siglip-so400m-patch14-384"
|
||||
ip_adapter_id = "guiyrt/InstantX-SD3.5-Large-IP-Adapter-diffusers"
|
||||
ip_adapter_id = "InstantX/SD3.5-Large-IP-Adapter"
|
||||
|
||||
feature_extractor = SiglipImageProcessor.from_pretrained(
|
||||
image_encoder_id,
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Stable Diffusion XL
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
Stable Diffusion XL (SDXL) was proposed in [SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis](https://huggingface.co/papers/2307.01952) by Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Text-to-image
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
The Stable Diffusion model was created by researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/), [Runway](https://github.com/runwayml), and [LAION](https://laion.ai/). The [`StableDiffusionPipeline`] is capable of generating photorealistic images given any text input. It's trained on 512x512 images from a subset of the LAION-5B dataset. This model uses a frozen CLIP ViT-L/14 text encoder to condition the model on text prompts. With its 860M UNet and 123M text encoder, the model is relatively lightweight and can run on consumer GPUs. Latent diffusion is the research on top of which Stable Diffusion was built. It was proposed in [High-Resolution Image Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) by Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, Björn Ommer.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Super-resolution
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
The Stable Diffusion upscaler diffusion model was created by the researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/), and [LAION](https://laion.ai/). It is used to enhance the resolution of input images by a factor of 4.
|
||||
|
||||
<Tip>
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Stable unCLIP
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
Stable unCLIP checkpoints are finetuned from [Stable Diffusion 2.1](./stable_diffusion/stable_diffusion_2) checkpoints to condition on CLIP image embeddings.
|
||||
Stable unCLIP still conditions on text embeddings. Given the two separate conditionings, stable unCLIP can be used
|
||||
for text guided image variation. When combined with an unCLIP prior, it can also be used for full text to image generation.
|
||||
|
||||
@@ -18,6 +18,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Text-to-video
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[ModelScope Text-to-Video Technical Report](https://arxiv.org/abs/2308.06571) is by Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, Shiwei Zhang.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Text2Video-Zero
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[Text2Video-Zero: Text-to-Image Diffusion Models are Zero-Shot Video Generators](https://huggingface.co/papers/2303.13439) is by Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, [Zhangyang Wang](https://www.ece.utexas.edu/people/faculty/atlas-wang), Shant Navasardyan, [Humphrey Shi](https://www.humphreyshi.com).
|
||||
|
||||
Text2Video-Zero enables zero-shot video generation using either:
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# UniDiffuser
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
The UniDiffuser model was proposed in [One Transformer Fits All Distributions in Multi-Modal Diffusion at Scale](https://huggingface.co/papers/2303.06555) by Fan Bao, Shen Nie, Kaiwen Xue, Chongxuan Li, Shi Pu, Yaole Wang, Gang Yue, Yue Cao, Hang Su, Jun Zhu.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Würstchen
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
<img src="https://github.com/dome272/Wuerstchen/assets/61938694/0617c863-165a-43ee-9303-2a17299a0cf9">
|
||||
|
||||
[Wuerstchen: An Efficient Architecture for Large-Scale Text-to-Image Diffusion Models](https://huggingface.co/papers/2306.00637) is by Pablo Pernias, Dominic Rampas, Mats L. Richter and Christopher Pal and Marc Aubreville.
|
||||
|
||||
19
docs/source/en/api/schedulers/ddim_cogvideox.md
Normal file
19
docs/source/en/api/schedulers/ddim_cogvideox.md
Normal file
@@ -0,0 +1,19 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# CogVideoXDDIMScheduler
|
||||
|
||||
`CogVideoXDDIMScheduler` is based on [Denoising Diffusion Implicit Models](https://huggingface.co/papers/2010.02502), specifically for CogVideoX models.
|
||||
|
||||
## CogVideoXDDIMScheduler
|
||||
|
||||
[[autodoc]] CogVideoXDDIMScheduler
|
||||
@@ -0,0 +1,19 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# CogVideoXDPMScheduler
|
||||
|
||||
`CogVideoXDPMScheduler` is based on [DPM-Solver: A Fast ODE Solver for Diffusion Probabilistic Model Sampling in Around 10 Steps](https://huggingface.co/papers/2206.00927) and [DPM-Solver++: Fast Solver for Guided Sampling of Diffusion Probabilistic Models](https://huggingface.co/papers/2211.01095), specifically for CogVideoX models.
|
||||
|
||||
## CogVideoXDPMScheduler
|
||||
|
||||
[[autodoc]] CogVideoXDPMScheduler
|
||||
@@ -45,3 +45,7 @@ Utility and helper functions for working with 🤗 Diffusers.
|
||||
## apply_layerwise_casting
|
||||
|
||||
[[autodoc]] hooks.layerwise_casting.apply_layerwise_casting
|
||||
|
||||
## apply_group_offloading
|
||||
|
||||
[[autodoc]] hooks.group_offloading.apply_group_offloading
|
||||
|
||||
@@ -158,6 +158,46 @@ In order to properly offload models after they're called, it is required to run
|
||||
|
||||
</Tip>
|
||||
|
||||
## Group offloading
|
||||
|
||||
Group offloading is the middle ground between sequential and model offloading. It works by offloading groups of internal layers (either `torch.nn.ModuleList` or `torch.nn.Sequential`), which uses less memory than model-level offloading. It is also faster than sequential-level offloading because the number of device synchronizations is reduced.
|
||||
|
||||
To enable group offloading, call the [`~ModelMixin.enable_group_offload`] method on the model if it is a Diffusers model implementation. For any other model implementation, use [`~hooks.group_offloading.apply_group_offloading`]:
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import CogVideoXPipeline
|
||||
from diffusers.hooks import apply_group_offloading
|
||||
from diffusers.utils import export_to_video
|
||||
|
||||
# Load the pipeline
|
||||
onload_device = torch.device("cuda")
|
||||
offload_device = torch.device("cpu")
|
||||
pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16)
|
||||
|
||||
# We can utilize the enable_group_offload method for Diffusers model implementations
|
||||
pipe.transformer.enable_group_offload(onload_device=onload_device, offload_device=offload_device, offload_type="leaf_level", use_stream=True)
|
||||
|
||||
# For any other model implementations, the apply_group_offloading function can be used
|
||||
apply_group_offloading(pipe.text_encoder, onload_device=onload_device, offload_type="block_level", num_blocks_per_group=2)
|
||||
apply_group_offloading(pipe.vae, onload_device=onload_device, offload_type="leaf_level")
|
||||
|
||||
prompt = (
|
||||
"A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. "
|
||||
"The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other "
|
||||
"pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, "
|
||||
"casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. "
|
||||
"The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical "
|
||||
"atmosphere of this unique musical performance."
|
||||
)
|
||||
video = pipe(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0]
|
||||
# This utilized about 14.79 GB. It can be further reduced by using tiling and using leaf_level offloading throughout the pipeline.
|
||||
print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB")
|
||||
export_to_video(video, "output.mp4", fps=8)
|
||||
```
|
||||
|
||||
Group offloading (for CUDA devices with support for asynchronous data transfer streams) overlaps data transfer and computation to reduce the overall execution time compared to sequential offloading. This is enabled using layer prefetching with CUDA streams. The next layer to be executed is loaded onto the accelerator device while the current layer is being executed - this increases the memory requirements slightly. Group offloading also supports leaf-level offloading (equivalent to sequential CPU offloading) but can be made much faster when using streams.
|
||||
|
||||
## FP8 layerwise weight-casting
|
||||
|
||||
PyTorch supports `torch.float8_e4m3fn` and `torch.float8_e5m2` as weight storage dtypes, but they can't be used for computation in many different tensor operations due to unimplemented kernel support. However, you can use these dtypes to store model weights in fp8 precision and upcast them on-the-fly when the layers are used in the forward pass. This is known as layerwise weight-casting.
|
||||
|
||||
@@ -339,7 +339,10 @@ import torch
|
||||
from huggingface_hub.repocard import RepoCard
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained("sayakpaul/custom-diffusion-cat-wooden-pot", torch_dtype=torch.float16).to("cuda")
|
||||
pipeline = DiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16,
|
||||
).to("cuda")
|
||||
model_id = "sayakpaul/custom-diffusion-cat-wooden-pot"
|
||||
pipeline.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin")
|
||||
pipeline.load_textual_inversion(model_id, weight_name="<new1>.bin")
|
||||
pipeline.load_textual_inversion(model_id, weight_name="<new2>.bin")
|
||||
|
||||
@@ -221,3 +221,7 @@ pipe.delete_adapters("toy")
|
||||
pipe.get_active_adapters()
|
||||
["pixel"]
|
||||
```
|
||||
|
||||
## PeftInputAutocastDisableHook
|
||||
|
||||
[[autodoc]] hooks.layerwise_casting.PeftInputAutocastDisableHook
|
||||
|
||||
@@ -461,12 +461,12 @@ Chain it to an upscaler pipeline to increase the image resolution:
|
||||
from diffusers import StableDiffusionLatentUpscalePipeline
|
||||
|
||||
upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(
|
||||
"stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
|
||||
"stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True
|
||||
)
|
||||
upscaler.enable_model_cpu_offload()
|
||||
upscaler.enable_xformers_memory_efficient_attention()
|
||||
|
||||
image_2 = upscaler(prompt, image=image_1, output_type="latent").images[0]
|
||||
image_2 = upscaler(prompt, image=image_1).images[0]
|
||||
```
|
||||
|
||||
Finally, chain it to a super-resolution pipeline to further enhance the resolution:
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
<!--Copyright 2024 Marigold authors and The HuggingFace Team. All rights reserved.
|
||||
<!--
|
||||
Copyright 2023-2025 Marigold Team, ETH Zürich. All rights reserved.
|
||||
Copyright 2024-2025 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
@@ -10,31 +12,38 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Marigold Pipelines for Computer Vision Tasks
|
||||
# Marigold Computer Vision
|
||||
|
||||
[Marigold](../api/pipelines/marigold) is a novel diffusion-based dense prediction approach, and a set of pipelines for various computer vision tasks, such as monocular depth estimation.
|
||||
**Marigold** is a diffusion-based [method](https://huggingface.co/papers/2312.02145) and a collection of [pipelines](../api/pipelines/marigold) designed for
|
||||
dense computer vision tasks, including **monocular depth prediction**, **surface normals estimation**, and **intrinsic
|
||||
image decomposition**.
|
||||
|
||||
This guide will show you how to use Marigold to obtain fast and high-quality predictions for images and videos.
|
||||
This guide will walk you through using Marigold to generate fast and high-quality predictions for images and videos.
|
||||
|
||||
Each pipeline supports one Computer Vision task, which takes an input RGB image as input and produces a *prediction* of the modality of interest, such as a depth map of the input image.
|
||||
Currently, the following tasks are implemented:
|
||||
Each pipeline is tailored for a specific computer vision task, processing an input RGB image and generating a
|
||||
corresponding prediction.
|
||||
Currently, the following computer vision tasks are implemented:
|
||||
|
||||
| Pipeline | Predicted Modalities | Demos |
|
||||
|---------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------:|
|
||||
| [MarigoldDepthPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py) | [Depth](https://en.wikipedia.org/wiki/Depth_map), [Disparity](https://en.wikipedia.org/wiki/Binocular_disparity) | [Fast Demo (LCM)](https://huggingface.co/spaces/prs-eth/marigold-lcm), [Slow Original Demo (DDIM)](https://huggingface.co/spaces/prs-eth/marigold) |
|
||||
| [MarigoldNormalsPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py) | [Surface normals](https://en.wikipedia.org/wiki/Normal_mapping) | [Fast Demo (LCM)](https://huggingface.co/spaces/prs-eth/marigold-normals-lcm) |
|
||||
| Pipeline | Recommended Model Checkpoints | Spaces (Interactive Apps) | Predicted Modalities |
|
||||
|---------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [MarigoldDepthPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py) | [prs-eth/marigold-depth-v1-1](https://huggingface.co/prs-eth/marigold-depth-v1-1) | [Depth Estimation](https://huggingface.co/spaces/prs-eth/marigold) | [Depth](https://en.wikipedia.org/wiki/Depth_map), [Disparity](https://en.wikipedia.org/wiki/Binocular_disparity) |
|
||||
| [MarigoldNormalsPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py) | [prs-eth/marigold-normals-v1-1](https://huggingface.co/prs-eth/marigold-normals-v1-1) | [Surface Normals Estimation](https://huggingface.co/spaces/prs-eth/marigold-normals) | [Surface normals](https://en.wikipedia.org/wiki/Normal_mapping) |
|
||||
| [MarigoldIntrinsicsPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_intrinsics.py) | [prs-eth/marigold-iid-appearance-v1-1](https://huggingface.co/prs-eth/marigold-iid-appearance-v1-1),<br>[prs-eth/marigold-iid-lighting-v1-1](https://huggingface.co/prs-eth/marigold-iid-lighting-v1-1) | [Intrinsic Image Decomposition](https://huggingface.co/spaces/prs-eth/marigold-iid) | [Albedo](https://en.wikipedia.org/wiki/Albedo), [Materials](https://www.n.aiq3d.com/wiki/roughnessmetalnessao-map), [Lighting](https://en.wikipedia.org/wiki/Diffuse_reflection) |
|
||||
|
||||
The original checkpoints can be found under the [PRS-ETH](https://huggingface.co/prs-eth/) Hugging Face organization.
|
||||
These checkpoints are meant to work with diffusers pipelines and the [original codebase](https://github.com/prs-eth/marigold).
|
||||
The original code can also be used to train new checkpoints.
|
||||
All original checkpoints are available under the [PRS-ETH](https://huggingface.co/prs-eth/) organization on Hugging Face.
|
||||
They are designed for use with diffusers pipelines and the [original codebase](https://github.com/prs-eth/marigold), which can also be used to train
|
||||
new model checkpoints.
|
||||
The following is a summary of the recommended checkpoints, all of which produce reliable results with 1 to 4 steps.
|
||||
|
||||
| Checkpoint | Modality | Comment |
|
||||
|-----------------------------------------------------------------------------------------------|----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [prs-eth/marigold-v1-0](https://huggingface.co/prs-eth/marigold-v1-0) | Depth | The first Marigold Depth checkpoint, which predicts *affine-invariant depth* maps. The performance of this checkpoint in benchmarks was studied in the original [paper](https://huggingface.co/papers/2312.02145). Designed to be used with the `DDIMScheduler` at inference, it requires at least 10 steps to get reliable predictions. Affine-invariant depth prediction has a range of values in each pixel between 0 (near plane) and 1 (far plane); both planes are chosen by the model as part of the inference process. See the `MarigoldImageProcessor` reference for visualization utilities. |
|
||||
| [prs-eth/marigold-depth-lcm-v1-0](https://huggingface.co/prs-eth/marigold-depth-lcm-v1-0) | Depth | The fast Marigold Depth checkpoint, fine-tuned from `prs-eth/marigold-v1-0`. Designed to be used with the `LCMScheduler` at inference, it requires as little as 1 step to get reliable predictions. The prediction reliability saturates at 4 steps and declines after that. |
|
||||
| [prs-eth/marigold-normals-v0-1](https://huggingface.co/prs-eth/marigold-normals-v0-1) | Normals | A preview checkpoint for the Marigold Normals pipeline. Designed to be used with the `DDIMScheduler` at inference, it requires at least 10 steps to get reliable predictions. The surface normals predictions are unit-length 3D vectors with values in the range from -1 to 1. *This checkpoint will be phased out after the release of `v1-0` version.* |
|
||||
| [prs-eth/marigold-normals-lcm-v0-1](https://huggingface.co/prs-eth/marigold-normals-lcm-v0-1) | Normals | The fast Marigold Normals checkpoint, fine-tuned from `prs-eth/marigold-normals-v0-1`. Designed to be used with the `LCMScheduler` at inference, it requires as little as 1 step to get reliable predictions. The prediction reliability saturates at 4 steps and declines after that. *This checkpoint will be phased out after the release of `v1-0` version.* |
|
||||
The examples below are mostly given for depth prediction, but they can be universally applied with other supported modalities.
|
||||
| Checkpoint | Modality | Comment |
|
||||
|-----------------------------------------------------------------------------------------------------|--------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [prs-eth/marigold-depth-v1-1](https://huggingface.co/prs-eth/marigold-depth-v1-1) | Depth | Affine-invariant depth prediction assigns each pixel a value between 0 (near plane) and 1 (far plane), with both planes determined by the model during inference. |
|
||||
| [prs-eth/marigold-normals-v0-1](https://huggingface.co/prs-eth/marigold-normals-v0-1) | Normals | The surface normals predictions are unit-length 3D vectors in the screen space camera, with values in the range from -1 to 1. |
|
||||
| [prs-eth/marigold-iid-appearance-v1-1](https://huggingface.co/prs-eth/marigold-iid-appearance-v1-1) | Intrinsics | InteriorVerse decomposition is comprised of Albedo and two BRDF material properties: Roughness and Metallicity. |
|
||||
| [prs-eth/marigold-iid-lighting-v1-1](https://huggingface.co/prs-eth/marigold-iid-lighting-v1-1) | Intrinsics | HyperSim decomposition of an image \\(I\\) is comprised of Albedo \\(A\\), Diffuse shading \\(S\\), and Non-diffuse residual \\(R\\): \\(I = A*S+R\\). |
|
||||
|
||||
The examples below are mostly given for depth prediction, but they can be universally applied to other supported
|
||||
modalities.
|
||||
We showcase the predictions using the same input image of Albert Einstein generated by Midjourney.
|
||||
This makes it easier to compare visualizations of the predictions across various modalities and checkpoints.
|
||||
|
||||
@@ -47,19 +56,21 @@ This makes it easier to compare visualizations of the predictions across various
|
||||
</div>
|
||||
</div>
|
||||
|
||||
### Depth Prediction Quick Start
|
||||
## Depth Prediction
|
||||
|
||||
To get the first depth prediction, load `prs-eth/marigold-depth-lcm-v1-0` checkpoint into `MarigoldDepthPipeline` pipeline, put the image through the pipeline, and save the predictions:
|
||||
To get a depth prediction, load the `prs-eth/marigold-depth-v1-1` checkpoint into [`MarigoldDepthPipeline`],
|
||||
put the image through the pipeline, and save the predictions:
|
||||
|
||||
```python
|
||||
import diffusers
|
||||
import torch
|
||||
|
||||
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
|
||||
"prs-eth/marigold-depth-v1-1", variant="fp16", torch_dtype=torch.float16
|
||||
).to("cuda")
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
|
||||
depth = pipe(image)
|
||||
|
||||
vis = pipe.image_processor.visualize_depth(depth.prediction)
|
||||
@@ -69,10 +80,13 @@ depth_16bit = pipe.image_processor.export_depth_to_16bit_png(depth.prediction)
|
||||
depth_16bit[0].save("einstein_depth_16bit.png")
|
||||
```
|
||||
|
||||
The visualization function for depth [`~pipelines.marigold.marigold_image_processing.MarigoldImageProcessor.visualize_depth`] applies one of [matplotlib's colormaps](https://matplotlib.org/stable/users/explain/colors/colormaps.html) (`Spectral` by default) to map the predicted pixel values from a single-channel `[0, 1]` depth range into an RGB image.
|
||||
With the `Spectral` colormap, pixels with near depth are painted red, and far pixels are assigned blue color.
|
||||
The [`~pipelines.marigold.marigold_image_processing.MarigoldImageProcessor.visualize_depth`] function applies one of
|
||||
[matplotlib's colormaps](https://matplotlib.org/stable/users/explain/colors/colormaps.html) (`Spectral` by default) to map the predicted pixel values from a single-channel `[0, 1]`
|
||||
depth range into an RGB image.
|
||||
With the `Spectral` colormap, pixels with near depth are painted red, and far pixels are blue.
|
||||
The 16-bit PNG file stores the single channel values mapped linearly from the `[0, 1]` range into `[0, 65535]`.
|
||||
Below are the raw and the visualized predictions; as can be seen, dark areas (mustache) are easier to distinguish in the visualization:
|
||||
Below are the raw and the visualized predictions. The darker and closer areas (mustache) are easier to distinguish in
|
||||
the visualization.
|
||||
|
||||
<div class="flex gap-4">
|
||||
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||
@@ -89,28 +103,33 @@ Below are the raw and the visualized predictions; as can be seen, dark areas (mu
|
||||
</div>
|
||||
</div>
|
||||
|
||||
### Surface Normals Prediction Quick Start
|
||||
## Surface Normals Estimation
|
||||
|
||||
Load `prs-eth/marigold-normals-lcm-v0-1` checkpoint into `MarigoldNormalsPipeline` pipeline, put the image through the pipeline, and save the predictions:
|
||||
Load the `prs-eth/marigold-normals-v1-1` checkpoint into [`MarigoldNormalsPipeline`], put the image through the
|
||||
pipeline, and save the predictions:
|
||||
|
||||
```python
|
||||
import diffusers
|
||||
import torch
|
||||
|
||||
pipe = diffusers.MarigoldNormalsPipeline.from_pretrained(
|
||||
"prs-eth/marigold-normals-lcm-v0-1", variant="fp16", torch_dtype=torch.float16
|
||||
"prs-eth/marigold-normals-v1-1", variant="fp16", torch_dtype=torch.float16
|
||||
).to("cuda")
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
|
||||
normals = pipe(image)
|
||||
|
||||
vis = pipe.image_processor.visualize_normals(normals.prediction)
|
||||
vis[0].save("einstein_normals.png")
|
||||
```
|
||||
|
||||
The visualization function for normals [`~pipelines.marigold.marigold_image_processing.MarigoldImageProcessor.visualize_normals`] maps the three-dimensional prediction with pixel values in the range `[-1, 1]` into an RGB image.
|
||||
The visualization function supports flipping surface normals axes to make the visualization compatible with other choices of the frame of reference.
|
||||
Conceptually, each pixel is painted according to the surface normal vector in the frame of reference, where `X` axis points right, `Y` axis points up, and `Z` axis points at the viewer.
|
||||
The [`~pipelines.marigold.marigold_image_processing.MarigoldImageProcessor.visualize_normals`] maps the three-dimensional
|
||||
prediction with pixel values in the range `[-1, 1]` into an RGB image.
|
||||
The visualization function supports flipping surface normals axes to make the visualization compatible with other
|
||||
choices of the frame of reference.
|
||||
Conceptually, each pixel is painted according to the surface normal vector in the frame of reference, where `X` axis
|
||||
points right, `Y` axis points up, and `Z` axis points at the viewer.
|
||||
Below is the visualized prediction:
|
||||
|
||||
<div class="flex gap-4" style="justify-content: center; width: 100%;">
|
||||
@@ -122,25 +141,121 @@ Below is the visualized prediction:
|
||||
</div>
|
||||
</div>
|
||||
|
||||
In this example, the nose tip almost certainly has a point on the surface, in which the surface normal vector points straight at the viewer, meaning that its coordinates are `[0, 0, 1]`.
|
||||
In this example, the nose tip almost certainly has a point on the surface, in which the surface normal vector points
|
||||
straight at the viewer, meaning that its coordinates are `[0, 0, 1]`.
|
||||
This vector maps to the RGB `[128, 128, 255]`, which corresponds to the violet-blue color.
|
||||
Similarly, a surface normal on the cheek in the right part of the image has a large `X` component, which increases the red hue.
|
||||
Similarly, a surface normal on the cheek in the right part of the image has a large `X` component, which increases the
|
||||
red hue.
|
||||
Points on the shoulders pointing up with a large `Y` promote green color.
|
||||
|
||||
### Speeding up inference
|
||||
## Intrinsic Image Decomposition
|
||||
|
||||
The above quick start snippets are already optimized for speed: they load the LCM checkpoint, use the `fp16` variant of weights and computation, and perform just one denoising diffusion step.
|
||||
The `pipe(image)` call completes in 280ms on RTX 3090 GPU.
|
||||
Internally, the input image is encoded with the Stable Diffusion VAE encoder, then the U-Net performs one denoising step, and finally, the prediction latent is decoded with the VAE decoder into pixel space.
|
||||
In this case, two out of three module calls are dedicated to converting between pixel and latent space of LDM.
|
||||
Because Marigold's latent space is compatible with the base Stable Diffusion, it is possible to speed up the pipeline call by more than 3x (85ms on RTX 3090) by using a [lightweight replacement of the SD VAE](../api/models/autoencoder_tiny):
|
||||
Marigold provides two models for Intrinsic Image Decomposition (IID): "Appearance" and "Lighting".
|
||||
Each model produces Albedo maps, derived from InteriorVerse and Hypersim annotations, respectively.
|
||||
|
||||
- The "Appearance" model also estimates Material properties: Roughness and Metallicity.
|
||||
- The "Lighting" model generates Diffuse Shading and Non-diffuse Residual.
|
||||
|
||||
Here is the sample code saving predictions made by the "Appearance" model:
|
||||
|
||||
```python
|
||||
import diffusers
|
||||
import torch
|
||||
|
||||
pipe = diffusers.MarigoldIntrinsicsPipeline.from_pretrained(
|
||||
"prs-eth/marigold-iid-appearance-v1-1", variant="fp16", torch_dtype=torch.float16
|
||||
).to("cuda")
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
|
||||
intrinsics = pipe(image)
|
||||
|
||||
vis = pipe.image_processor.visualize_intrinsics(intrinsics.prediction, pipe.target_properties)
|
||||
vis[0]["albedo"].save("einstein_albedo.png")
|
||||
vis[0]["roughness"].save("einstein_roughness.png")
|
||||
vis[0]["metallicity"].save("einstein_metallicity.png")
|
||||
```
|
||||
|
||||
Another example demonstrating the predictions made by the "Lighting" model:
|
||||
|
||||
```python
|
||||
import diffusers
|
||||
import torch
|
||||
|
||||
pipe = diffusers.MarigoldIntrinsicsPipeline.from_pretrained(
|
||||
"prs-eth/marigold-iid-lighting-v1-1", variant="fp16", torch_dtype=torch.float16
|
||||
).to("cuda")
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
|
||||
intrinsics = pipe(image)
|
||||
|
||||
vis = pipe.image_processor.visualize_intrinsics(intrinsics.prediction, pipe.target_properties)
|
||||
vis[0]["albedo"].save("einstein_albedo.png")
|
||||
vis[0]["shading"].save("einstein_shading.png")
|
||||
vis[0]["residual"].save("einstein_residual.png")
|
||||
```
|
||||
|
||||
Both models share the same pipeline while supporting different decomposition types.
|
||||
The exact decomposition parameterization (e.g., sRGB vs. linear space) is stored in the
|
||||
`pipe.target_properties` dictionary, which is passed into the
|
||||
[`~pipelines.marigold.marigold_image_processing.MarigoldImageProcessor.visualize_intrinsics`] function.
|
||||
|
||||
Below are some examples showcasing the predicted decomposition outputs.
|
||||
All modalities can be inspected in the
|
||||
[Intrinsic Image Decomposition](https://huggingface.co/spaces/prs-eth/marigold-iid) Space.
|
||||
|
||||
<div class="flex gap-4">
|
||||
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/8c7986eaaab5eb9604eb88336311f46a7b0ff5ab/marigold/marigold_einstein_albedo.png"/>
|
||||
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||
Predicted albedo ("Appearance" model)
|
||||
</figcaption>
|
||||
</div>
|
||||
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/8c7986eaaab5eb9604eb88336311f46a7b0ff5ab/marigold/marigold_einstein_diffuse.png"/>
|
||||
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||
Predicted diffuse shading ("Lighting" model)
|
||||
</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
## Speeding up inference
|
||||
|
||||
The above quick start snippets are already optimized for quality and speed, loading the checkpoint, utilizing the
|
||||
`fp16` variant of weights and computation, and performing the default number (4) of denoising diffusion steps.
|
||||
The first step to accelerate inference, at the expense of prediction quality, is to reduce the denoising diffusion
|
||||
steps to the minimum:
|
||||
|
||||
```diff
|
||||
import diffusers
|
||||
import torch
|
||||
|
||||
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
|
||||
"prs-eth/marigold-depth-v1-1", variant="fp16", torch_dtype=torch.float16
|
||||
).to("cuda")
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
|
||||
- depth = pipe(image)
|
||||
+ depth = pipe(image, num_inference_steps=1)
|
||||
```
|
||||
|
||||
With this change, the `pipe` call completes in 280ms on RTX 3090 GPU.
|
||||
Internally, the input image is first encoded using the Stable Diffusion VAE encoder, followed by a single denoising
|
||||
step performed by the U-Net.
|
||||
Finally, the prediction latent is decoded with the VAE decoder into pixel space.
|
||||
In this setup, two out of three module calls are dedicated to converting between the pixel and latent spaces of the LDM.
|
||||
Since Marigold's latent space is compatible with Stable Diffusion 2.0, inference can be accelerated by more than 3x,
|
||||
reducing the call time to 85ms on an RTX 3090, by using a [lightweight replacement of the SD VAE](../api/models/autoencoder_tiny).
|
||||
Note that using a lightweight VAE may slightly reduce the visual quality of the predictions.
|
||||
|
||||
```diff
|
||||
import diffusers
|
||||
import torch
|
||||
|
||||
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||
"prs-eth/marigold-depth-v1-1", variant="fp16", torch_dtype=torch.float16
|
||||
).to("cuda")
|
||||
|
||||
+ pipe.vae = diffusers.AutoencoderTiny.from_pretrained(
|
||||
@@ -148,78 +263,77 @@ Because Marigold's latent space is compatible with the base Stable Diffusion, it
|
||||
+ ).cuda()
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
depth = pipe(image)
|
||||
|
||||
depth = pipe(image, num_inference_steps=1)
|
||||
```
|
||||
|
||||
As suggested in [Optimizations](../optimization/torch2.0#torch.compile), adding `torch.compile` may squeeze extra performance depending on the target hardware:
|
||||
So far, we have optimized the number of diffusion steps and model components. Self-attention operations account for a
|
||||
significant portion of computations.
|
||||
Speeding them up can be achieved by using a more efficient attention processor:
|
||||
|
||||
```diff
|
||||
import diffusers
|
||||
import torch
|
||||
+ from diffusers.models.attention_processor import AttnProcessor2_0
|
||||
|
||||
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
|
||||
"prs-eth/marigold-depth-v1-1", variant="fp16", torch_dtype=torch.float16
|
||||
).to("cuda")
|
||||
|
||||
+ pipe.vae.set_attn_processor(AttnProcessor2_0())
|
||||
+ pipe.unet.set_attn_processor(AttnProcessor2_0())
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
|
||||
depth = pipe(image, num_inference_steps=1)
|
||||
```
|
||||
|
||||
Finally, as suggested in [Optimizations](../optimization/torch2.0#torch.compile), enabling `torch.compile` can further enhance performance depending on
|
||||
the target hardware.
|
||||
However, compilation incurs a significant overhead during the first pipeline invocation, making it beneficial only when
|
||||
the same pipeline instance is called repeatedly, such as within a loop.
|
||||
|
||||
```diff
|
||||
import diffusers
|
||||
import torch
|
||||
from diffusers.models.attention_processor import AttnProcessor2_0
|
||||
|
||||
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||
"prs-eth/marigold-depth-v1-1", variant="fp16", torch_dtype=torch.float16
|
||||
).to("cuda")
|
||||
|
||||
pipe.vae.set_attn_processor(AttnProcessor2_0())
|
||||
pipe.unet.set_attn_processor(AttnProcessor2_0())
|
||||
|
||||
+ pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
|
||||
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
depth = pipe(image)
|
||||
|
||||
depth = pipe(image, num_inference_steps=1)
|
||||
```
|
||||
|
||||
## Qualitative Comparison with Depth Anything
|
||||
|
||||
With the above speed optimizations, Marigold delivers predictions with more details and faster than [Depth Anything](https://huggingface.co/docs/transformers/main/en/model_doc/depth_anything) with the largest checkpoint [LiheYoung/depth-anything-large-hf](https://huggingface.co/LiheYoung/depth-anything-large-hf):
|
||||
|
||||
<div class="flex gap-4">
|
||||
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_depth.png"/>
|
||||
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||
Marigold LCM fp16 with Tiny AutoEncoder
|
||||
</figcaption>
|
||||
</div>
|
||||
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/einstein_depthanything_large.png"/>
|
||||
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||
Depth Anything Large
|
||||
</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
## Maximizing Precision and Ensembling
|
||||
|
||||
Marigold pipelines have a built-in ensembling mechanism combining multiple predictions from different random latents.
|
||||
This is a brute-force way of improving the precision of predictions, capitalizing on the generative nature of diffusion.
|
||||
The ensembling path is activated automatically when the `ensemble_size` argument is set greater than `1`.
|
||||
The ensembling path is activated automatically when the `ensemble_size` argument is set greater or equal than `3`.
|
||||
When aiming for maximum precision, it makes sense to adjust `num_inference_steps` simultaneously with `ensemble_size`.
|
||||
The recommended values vary across checkpoints but primarily depend on the scheduler type.
|
||||
The effect of ensembling is particularly well-seen with surface normals:
|
||||
|
||||
```python
|
||||
import diffusers
|
||||
```diff
|
||||
import diffusers
|
||||
|
||||
model_path = "prs-eth/marigold-normals-v1-0"
|
||||
pipe = diffusers.MarigoldNormalsPipeline.from_pretrained("prs-eth/marigold-normals-v1-1").to("cuda")
|
||||
|
||||
model_paper_kwargs = {
|
||||
diffusers.schedulers.DDIMScheduler: {
|
||||
"num_inference_steps": 10,
|
||||
"ensemble_size": 10,
|
||||
},
|
||||
diffusers.schedulers.LCMScheduler: {
|
||||
"num_inference_steps": 4,
|
||||
"ensemble_size": 5,
|
||||
},
|
||||
}
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
- depth = pipe(image)
|
||||
+ depth = pipe(image, num_inference_steps=10, ensemble_size=5)
|
||||
|
||||
pipe = diffusers.MarigoldNormalsPipeline.from_pretrained(model_path).to("cuda")
|
||||
pipe_kwargs = model_paper_kwargs[type(pipe.scheduler)]
|
||||
|
||||
depth = pipe(image, **pipe_kwargs)
|
||||
|
||||
vis = pipe.image_processor.visualize_normals(depth.prediction)
|
||||
vis[0].save("einstein_normals.png")
|
||||
vis = pipe.image_processor.visualize_normals(depth.prediction)
|
||||
vis[0].save("einstein_normals.png")
|
||||
```
|
||||
|
||||
<div class="flex gap-4">
|
||||
@@ -237,93 +351,16 @@ vis[0].save("einstein_normals.png")
|
||||
</div>
|
||||
</div>
|
||||
|
||||
As can be seen, all areas with fine-grained structurers, such as hair, got more conservative and on average more correct predictions.
|
||||
As can be seen, all areas with fine-grained structurers, such as hair, got more conservative and on average more
|
||||
correct predictions.
|
||||
Such a result is more suitable for precision-sensitive downstream tasks, such as 3D reconstruction.
|
||||
|
||||
## Quantitative Evaluation
|
||||
|
||||
To evaluate Marigold quantitatively in standard leaderboards and benchmarks (such as NYU, KITTI, and other datasets), follow the evaluation protocol outlined in the paper: load the full precision fp32 model and use appropriate values for `num_inference_steps` and `ensemble_size`.
|
||||
Optionally seed randomness to ensure reproducibility. Maximizing `batch_size` will deliver maximum device utilization.
|
||||
|
||||
```python
|
||||
import diffusers
|
||||
import torch
|
||||
|
||||
device = "cuda"
|
||||
seed = 2024
|
||||
model_path = "prs-eth/marigold-v1-0"
|
||||
|
||||
model_paper_kwargs = {
|
||||
diffusers.schedulers.DDIMScheduler: {
|
||||
"num_inference_steps": 50,
|
||||
"ensemble_size": 10,
|
||||
},
|
||||
diffusers.schedulers.LCMScheduler: {
|
||||
"num_inference_steps": 4,
|
||||
"ensemble_size": 10,
|
||||
},
|
||||
}
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
|
||||
generator = torch.Generator(device=device).manual_seed(seed)
|
||||
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(model_path).to(device)
|
||||
pipe_kwargs = model_paper_kwargs[type(pipe.scheduler)]
|
||||
|
||||
depth = pipe(image, generator=generator, **pipe_kwargs)
|
||||
|
||||
# evaluate metrics
|
||||
```
|
||||
|
||||
## Using Predictive Uncertainty
|
||||
|
||||
The ensembling mechanism built into Marigold pipelines combines multiple predictions obtained from different random latents.
|
||||
As a side effect, it can be used to quantify epistemic (model) uncertainty; simply specify `ensemble_size` greater than 1 and set `output_uncertainty=True`.
|
||||
The resulting uncertainty will be available in the `uncertainty` field of the output.
|
||||
It can be visualized as follows:
|
||||
|
||||
```python
|
||||
import diffusers
|
||||
import torch
|
||||
|
||||
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
|
||||
).to("cuda")
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
depth = pipe(
|
||||
image,
|
||||
ensemble_size=10, # any number greater than 1; higher values yield higher precision
|
||||
output_uncertainty=True,
|
||||
)
|
||||
|
||||
uncertainty = pipe.image_processor.visualize_uncertainty(depth.uncertainty)
|
||||
uncertainty[0].save("einstein_depth_uncertainty.png")
|
||||
```
|
||||
|
||||
<div class="flex gap-4">
|
||||
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_depth_uncertainty.png"/>
|
||||
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||
Depth uncertainty
|
||||
</figcaption>
|
||||
</div>
|
||||
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_normals_uncertainty.png"/>
|
||||
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||
Surface normals uncertainty
|
||||
</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
The interpretation of uncertainty is easy: higher values (white) correspond to pixels, where the model struggles to make consistent predictions.
|
||||
Evidently, the depth model is the least confident around edges with discontinuity, where the object depth changes drastically.
|
||||
The surface normals model is the least confident in fine-grained structures, such as hair, and dark areas, such as the collar.
|
||||
|
||||
## Frame-by-frame Video Processing with Temporal Consistency
|
||||
|
||||
Due to Marigold's generative nature, each prediction is unique and defined by the random noise sampled for the latent initialization.
|
||||
This becomes an obvious drawback compared to traditional end-to-end dense regression networks, as exemplified in the following videos:
|
||||
Due to Marigold's generative nature, each prediction is unique and defined by the random noise sampled for the latent
|
||||
initialization.
|
||||
This becomes an obvious drawback compared to traditional end-to-end dense regression networks, as exemplified in the
|
||||
following videos:
|
||||
|
||||
<div class="flex gap-4">
|
||||
<div style="flex: 1 1 50%; max-width: 50%;">
|
||||
@@ -336,26 +373,32 @@ This becomes an obvious drawback compared to traditional end-to-end dense regres
|
||||
</div>
|
||||
</div>
|
||||
|
||||
To address this issue, it is possible to pass `latents` argument to the pipelines, which defines the starting point of diffusion.
|
||||
Empirically, we found that a convex combination of the very same starting point noise latent and the latent corresponding to the previous frame prediction give sufficiently smooth results, as implemented in the snippet below:
|
||||
To address this issue, it is possible to pass `latents` argument to the pipelines, which defines the starting point of
|
||||
diffusion.
|
||||
Empirically, we found that a convex combination of the very same starting point noise latent and the latent
|
||||
corresponding to the previous frame prediction give sufficiently smooth results, as implemented in the snippet below:
|
||||
|
||||
```python
|
||||
import imageio
|
||||
from PIL import Image
|
||||
from tqdm import tqdm
|
||||
import diffusers
|
||||
import torch
|
||||
from diffusers.models.attention_processor import AttnProcessor2_0
|
||||
from PIL import Image
|
||||
from tqdm import tqdm
|
||||
|
||||
device = "cuda"
|
||||
path_in = "obama.mp4"
|
||||
path_in = "https://huggingface.co/spaces/prs-eth/marigold-lcm/resolve/c7adb5427947d2680944f898cd91d386bf0d4924/files/video/obama.mp4"
|
||||
path_out = "obama_depth.gif"
|
||||
|
||||
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
|
||||
"prs-eth/marigold-depth-v1-1", variant="fp16", torch_dtype=torch.float16
|
||||
).to(device)
|
||||
pipe.vae = diffusers.AutoencoderTiny.from_pretrained(
|
||||
"madebyollin/taesd", torch_dtype=torch.float16
|
||||
).to(device)
|
||||
pipe.unet.set_attn_processor(AttnProcessor2_0())
|
||||
pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
|
||||
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
||||
pipe.set_progress_bar_config(disable=True)
|
||||
|
||||
with imageio.get_reader(path_in) as reader:
|
||||
@@ -373,7 +416,11 @@ with imageio.get_reader(path_in) as reader:
|
||||
latents = 0.9 * latents + 0.1 * last_frame_latent
|
||||
|
||||
depth = pipe(
|
||||
frame, match_input_resolution=False, latents=latents, output_latent=True
|
||||
frame,
|
||||
num_inference_steps=1,
|
||||
match_input_resolution=False,
|
||||
latents=latents,
|
||||
output_latent=True,
|
||||
)
|
||||
last_frame_latent = depth.latent
|
||||
out.append(pipe.image_processor.visualize_depth(depth.prediction)[0])
|
||||
@@ -382,7 +429,8 @@ with imageio.get_reader(path_in) as reader:
|
||||
```
|
||||
|
||||
Here, the diffusion process starts from the given computed latent.
|
||||
The pipeline sets `output_latent=True` to access `out.latent` and computes its contribution to the next frame's latent initialization.
|
||||
The pipeline sets `output_latent=True` to access `out.latent` and computes its contribution to the next frame's latent
|
||||
initialization.
|
||||
The result is much more stable now:
|
||||
|
||||
<div class="flex gap-4">
|
||||
@@ -414,7 +462,7 @@ image = diffusers.utils.load_image(
|
||||
)
|
||||
|
||||
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||
"prs-eth/marigold-depth-lcm-v1-0", torch_dtype=torch.float16, variant="fp16"
|
||||
"prs-eth/marigold-depth-v1-1", torch_dtype=torch.float16, variant="fp16"
|
||||
).to(device)
|
||||
|
||||
depth_image = pipe(image, generator=generator).prediction
|
||||
@@ -463,4 +511,95 @@ controlnet_out[0].save("motorcycle_controlnet_out.png")
|
||||
</div>
|
||||
</div>
|
||||
|
||||
Hopefully, you will find Marigold useful for solving your downstream tasks, be it a part of a more broad generative workflow, or a perception task, such as 3D reconstruction.
|
||||
## Quantitative Evaluation
|
||||
|
||||
To evaluate Marigold quantitatively in standard leaderboards and benchmarks (such as NYU, KITTI, and other datasets),
|
||||
follow the evaluation protocol outlined in the paper: load the full precision fp32 model and use appropriate values
|
||||
for `num_inference_steps` and `ensemble_size`.
|
||||
Optionally seed randomness to ensure reproducibility.
|
||||
Maximizing `batch_size` will deliver maximum device utilization.
|
||||
|
||||
```python
|
||||
import diffusers
|
||||
import torch
|
||||
|
||||
device = "cuda"
|
||||
seed = 2024
|
||||
|
||||
generator = torch.Generator(device=device).manual_seed(seed)
|
||||
pipe = diffusers.MarigoldDepthPipeline.from_pretrained("prs-eth/marigold-depth-v1-1").to(device)
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
|
||||
depth = pipe(
|
||||
image,
|
||||
num_inference_steps=4, # set according to the evaluation protocol from the paper
|
||||
ensemble_size=10, # set according to the evaluation protocol from the paper
|
||||
generator=generator,
|
||||
)
|
||||
|
||||
# evaluate metrics
|
||||
```
|
||||
|
||||
## Using Predictive Uncertainty
|
||||
|
||||
The ensembling mechanism built into Marigold pipelines combines multiple predictions obtained from different random
|
||||
latents.
|
||||
As a side effect, it can be used to quantify epistemic (model) uncertainty; simply specify `ensemble_size` greater
|
||||
or equal than 3 and set `output_uncertainty=True`.
|
||||
The resulting uncertainty will be available in the `uncertainty` field of the output.
|
||||
It can be visualized as follows:
|
||||
|
||||
```python
|
||||
import diffusers
|
||||
import torch
|
||||
|
||||
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||
"prs-eth/marigold-depth-v1-1", variant="fp16", torch_dtype=torch.float16
|
||||
).to("cuda")
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
|
||||
depth = pipe(
|
||||
image,
|
||||
ensemble_size=10, # any number >= 3
|
||||
output_uncertainty=True,
|
||||
)
|
||||
|
||||
uncertainty = pipe.image_processor.visualize_uncertainty(depth.uncertainty)
|
||||
uncertainty[0].save("einstein_depth_uncertainty.png")
|
||||
```
|
||||
|
||||
<div class="flex gap-4">
|
||||
<div style="flex: 1 1 33%; max-width: 33%;">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_depth_uncertainty.png"/>
|
||||
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||
Depth uncertainty
|
||||
</figcaption>
|
||||
</div>
|
||||
<div style="flex: 1 1 33%; max-width: 33%;">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_normals_uncertainty.png"/>
|
||||
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||
Surface normals uncertainty
|
||||
</figcaption>
|
||||
</div>
|
||||
<div style="flex: 1 1 33%; max-width: 33%;">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/4f83035d84a24e5ec44fdda129b1d51eba12ce04/marigold/marigold_einstein_albedo_uncertainty.png"/>
|
||||
<figcaption class="mt-1 text-center text-sm text-gray-500">
|
||||
Albedo uncertainty
|
||||
</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
The interpretation of uncertainty is easy: higher values (white) correspond to pixels, where the model struggles to
|
||||
make consistent predictions.
|
||||
- The depth model exhibits the most uncertainty around discontinuities, where object depth changes abruptly.
|
||||
- The surface normals model is least confident in fine-grained structures like hair and in dark regions such as the
|
||||
collar area.
|
||||
- Albedo uncertainty is represented as an RGB image, as it captures uncertainty independently for each color channel,
|
||||
unlike depth and surface normals. It is also higher in shaded regions and at discontinuities.
|
||||
|
||||
## Conclusion
|
||||
|
||||
We hope Marigold proves valuable for your downstream tasks, whether as part of a broader generative workflow or for
|
||||
perception-based applications like 3D reconstruction.
|
||||
317
docs/source/en/using-diffusers/omnigen.md
Normal file
317
docs/source/en/using-diffusers/omnigen.md
Normal file
@@ -0,0 +1,317 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
# OmniGen
|
||||
|
||||
OmniGen is an image generation model. Unlike existing text-to-image models, OmniGen is a single model designed to handle a variety of tasks (e.g., text-to-image, image editing, controllable generation). It has the following features:
|
||||
- Minimalist model architecture, consisting of only a VAE and a transformer module, for joint modeling of text and images.
|
||||
- Support for multimodal inputs. It can process any text-image mixed data as instructions for image generation, rather than relying solely on text.
|
||||
|
||||
For more information, please refer to the [paper](https://arxiv.org/pdf/2409.11340).
|
||||
This guide will walk you through using OmniGen for various tasks and use cases.
|
||||
|
||||
## Load model checkpoints
|
||||
|
||||
Model weights may be stored in separate subfolders on the Hub or locally, in which case, you should use the [`~DiffusionPipeline.from_pretrained`] method.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import OmniGenPipeline
|
||||
|
||||
pipe = OmniGenPipeline.from_pretrained("Shitao/OmniGen-v1-diffusers", torch_dtype=torch.bfloat16)
|
||||
```
|
||||
|
||||
## Text-to-image
|
||||
|
||||
For text-to-image, pass a text prompt. By default, OmniGen generates a 1024x1024 image.
|
||||
You can try setting the `height` and `width` parameters to generate images with different size.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import OmniGenPipeline
|
||||
|
||||
pipe = OmniGenPipeline.from_pretrained(
|
||||
"Shitao/OmniGen-v1-diffusers",
|
||||
torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.to("cuda")
|
||||
|
||||
prompt = "Realistic photo. A young woman sits on a sofa, holding a book and facing the camera. She wears delicate silver hoop earrings adorned with tiny, sparkling diamonds that catch the light, with her long chestnut hair cascading over her shoulders. Her eyes are focused and gentle, framed by long, dark lashes. She is dressed in a cozy cream sweater, which complements her warm, inviting smile. Behind her, there is a table with a cup of water in a sleek, minimalist blue mug. The background is a serene indoor setting with soft natural light filtering through a window, adorned with tasteful art and flowers, creating a cozy and peaceful ambiance. 4K, HD."
|
||||
image = pipe(
|
||||
prompt=prompt,
|
||||
height=1024,
|
||||
width=1024,
|
||||
guidance_scale=3,
|
||||
generator=torch.Generator(device="cpu").manual_seed(111),
|
||||
).images[0]
|
||||
image.save("output.png")
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/t2i_woman_with_book.png" alt="generated image"/>
|
||||
</div>
|
||||
|
||||
## Image edit
|
||||
|
||||
OmniGen supports multimodal inputs.
|
||||
When the input includes an image, you need to add a placeholder `<img><|image_1|></img>` in the text prompt to represent the image.
|
||||
It is recommended to enable `use_input_image_size_as_output` to keep the edited image the same size as the original image.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import OmniGenPipeline
|
||||
from diffusers.utils import load_image
|
||||
|
||||
pipe = OmniGenPipeline.from_pretrained(
|
||||
"Shitao/OmniGen-v1-diffusers",
|
||||
torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.to("cuda")
|
||||
|
||||
prompt="<img><|image_1|></img> Remove the woman's earrings. Replace the mug with a clear glass filled with sparkling iced cola."
|
||||
input_images=[load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/t2i_woman_with_book.png")]
|
||||
image = pipe(
|
||||
prompt=prompt,
|
||||
input_images=input_images,
|
||||
guidance_scale=2,
|
||||
img_guidance_scale=1.6,
|
||||
use_input_image_size_as_output=True,
|
||||
generator=torch.Generator(device="cpu").manual_seed(222)
|
||||
).images[0]
|
||||
image.save("output.png")
|
||||
```
|
||||
|
||||
<div class="flex flex-row gap-4">
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/t2i_woman_with_book.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/edit.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">edited image</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
OmniGen has some interesting features, such as visual reasoning, as shown in the example below.
|
||||
|
||||
```python
|
||||
prompt="If the woman is thirsty, what should she take? Find it in the image and highlight it in blue. <img><|image_1|></img>"
|
||||
input_images=[load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/edit.png")]
|
||||
image = pipe(
|
||||
prompt=prompt,
|
||||
input_images=input_images,
|
||||
guidance_scale=2,
|
||||
img_guidance_scale=1.6,
|
||||
use_input_image_size_as_output=True,
|
||||
generator=torch.Generator(device="cpu").manual_seed(0)
|
||||
).images[0]
|
||||
image.save("output.png")
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/reasoning.png" alt="generated image"/>
|
||||
</div>
|
||||
|
||||
## Controllable generation
|
||||
|
||||
OmniGen can handle several classic computer vision tasks. As shown below, OmniGen can detect human skeletons in input images, which can be used as control conditions to generate new images.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import OmniGenPipeline
|
||||
from diffusers.utils import load_image
|
||||
|
||||
pipe = OmniGenPipeline.from_pretrained(
|
||||
"Shitao/OmniGen-v1-diffusers",
|
||||
torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.to("cuda")
|
||||
|
||||
prompt="Detect the skeleton of human in this image: <img><|image_1|></img>"
|
||||
input_images=[load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/edit.png")]
|
||||
image1 = pipe(
|
||||
prompt=prompt,
|
||||
input_images=input_images,
|
||||
guidance_scale=2,
|
||||
img_guidance_scale=1.6,
|
||||
use_input_image_size_as_output=True,
|
||||
generator=torch.Generator(device="cpu").manual_seed(333)
|
||||
).images[0]
|
||||
image1.save("image1.png")
|
||||
|
||||
prompt="Generate a new photo using the following picture and text as conditions: <img><|image_1|></img>\n A young boy is sitting on a sofa in the library, holding a book. His hair is neatly combed, and a faint smile plays on his lips, with a few freckles scattered across his cheeks. The library is quiet, with rows of shelves filled with books stretching out behind him."
|
||||
input_images=[load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/skeletal.png")]
|
||||
image2 = pipe(
|
||||
prompt=prompt,
|
||||
input_images=input_images,
|
||||
guidance_scale=2,
|
||||
img_guidance_scale=1.6,
|
||||
use_input_image_size_as_output=True,
|
||||
generator=torch.Generator(device="cpu").manual_seed(333)
|
||||
).images[0]
|
||||
image2.save("image2.png")
|
||||
```
|
||||
|
||||
<div class="flex flex-row gap-4">
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/edit.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/skeletal.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">detected skeleton</figcaption>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/skeletal2img.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">skeleton to image</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
OmniGen can also directly use relevant information from input images to generate new images.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import OmniGenPipeline
|
||||
from diffusers.utils import load_image
|
||||
|
||||
pipe = OmniGenPipeline.from_pretrained(
|
||||
"Shitao/OmniGen-v1-diffusers",
|
||||
torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.to("cuda")
|
||||
|
||||
prompt="Following the pose of this image <img><|image_1|></img>, generate a new photo: A young boy is sitting on a sofa in the library, holding a book. His hair is neatly combed, and a faint smile plays on his lips, with a few freckles scattered across his cheeks. The library is quiet, with rows of shelves filled with books stretching out behind him."
|
||||
input_images=[load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/edit.png")]
|
||||
image = pipe(
|
||||
prompt=prompt,
|
||||
input_images=input_images,
|
||||
guidance_scale=2,
|
||||
img_guidance_scale=1.6,
|
||||
use_input_image_size_as_output=True,
|
||||
generator=torch.Generator(device="cpu").manual_seed(0)
|
||||
).images[0]
|
||||
image.save("output.png")
|
||||
```
|
||||
|
||||
<div class="flex flex-row gap-4">
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/same_pose.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
## ID and object preserving
|
||||
|
||||
OmniGen can generate multiple images based on the people and objects in the input image and supports inputting multiple images simultaneously.
|
||||
Additionally, OmniGen can extract desired objects from an image containing multiple objects based on instructions.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import OmniGenPipeline
|
||||
from diffusers.utils import load_image
|
||||
|
||||
pipe = OmniGenPipeline.from_pretrained(
|
||||
"Shitao/OmniGen-v1-diffusers",
|
||||
torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.to("cuda")
|
||||
|
||||
prompt="A man and a woman are sitting at a classroom desk. The man is the man with yellow hair in <img><|image_1|></img>. The woman is the woman on the left of <img><|image_2|></img>"
|
||||
input_image_1 = load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/3.png")
|
||||
input_image_2 = load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/4.png")
|
||||
input_images=[input_image_1, input_image_2]
|
||||
image = pipe(
|
||||
prompt=prompt,
|
||||
input_images=input_images,
|
||||
height=1024,
|
||||
width=1024,
|
||||
guidance_scale=2.5,
|
||||
img_guidance_scale=1.6,
|
||||
generator=torch.Generator(device="cpu").manual_seed(666)
|
||||
).images[0]
|
||||
image.save("output.png")
|
||||
```
|
||||
|
||||
<div class="flex flex-row gap-4">
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/3.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">input_image_1</figcaption>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/4.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">input_image_2</figcaption>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/id2.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
```py
|
||||
import torch
|
||||
from diffusers import OmniGenPipeline
|
||||
from diffusers.utils import load_image
|
||||
|
||||
pipe = OmniGenPipeline.from_pretrained(
|
||||
"Shitao/OmniGen-v1-diffusers",
|
||||
torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.to("cuda")
|
||||
|
||||
prompt="A woman is walking down the street, wearing a white long-sleeve blouse with lace details on the sleeves, paired with a blue pleated skirt. The woman is <img><|image_1|></img>. The long-sleeve blouse and a pleated skirt are <img><|image_2|></img>."
|
||||
input_image_1 = load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/emma.jpeg")
|
||||
input_image_2 = load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/dress.jpg")
|
||||
input_images=[input_image_1, input_image_2]
|
||||
image = pipe(
|
||||
prompt=prompt,
|
||||
input_images=input_images,
|
||||
height=1024,
|
||||
width=1024,
|
||||
guidance_scale=2.5,
|
||||
img_guidance_scale=1.6,
|
||||
generator=torch.Generator(device="cpu").manual_seed(666)
|
||||
).images[0]
|
||||
image.save("output.png")
|
||||
```
|
||||
|
||||
<div class="flex flex-row gap-4">
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/emma.jpeg"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">person image</figcaption>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/dress.jpg"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">clothe image</figcaption>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/tryon.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
## Optimization when using multiple images
|
||||
|
||||
For text-to-image task, OmniGen requires minimal memory and time costs (9GB memory and 31s for a 1024x1024 image on A800 GPU).
|
||||
However, when using input images, the computational cost increases.
|
||||
|
||||
Here are some guidelines to help you reduce computational costs when using multiple images. The experiments are conducted on an A800 GPU with two input images.
|
||||
|
||||
Like other pipelines, you can reduce memory usage by offloading the model: `pipe.enable_model_cpu_offload()` or `pipe.enable_sequential_cpu_offload() `.
|
||||
In OmniGen, you can also decrease computational overhead by reducing the `max_input_image_size`.
|
||||
The memory consumption for different image sizes is shown in the table below:
|
||||
|
||||
| Method | Memory Usage |
|
||||
|---------------------------|--------------|
|
||||
| max_input_image_size=1024 | 40GB |
|
||||
| max_input_image_size=512 | 17GB |
|
||||
| max_input_image_size=256 | 14GB |
|
||||
|
||||
@@ -215,7 +215,7 @@ image
|
||||
|
||||
Prompt weighting provides a way to emphasize or de-emphasize certain parts of a prompt, allowing for more control over the generated image. A prompt can include several concepts, which gets turned into contextualized text embeddings. The embeddings are used by the model to condition its cross-attention layers to generate an image (read the Stable Diffusion [blog post](https://huggingface.co/blog/stable_diffusion) to learn more about how it works).
|
||||
|
||||
Prompt weighting works by increasing or decreasing the scale of the text embedding vector that corresponds to its concept in the prompt because you may not necessarily want the model to focus on all concepts equally. The easiest way to prepare the prompt-weighted embeddings is to use [Compel](https://github.com/damian0815/compel), a text prompt-weighting and blending library. Once you have the prompt-weighted embeddings, you can pass them to any pipeline that has a [`prompt_embeds`](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline.__call__.prompt_embeds) (and optionally [`negative_prompt_embeds`](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline.__call__.negative_prompt_embeds)) parameter, such as [`StableDiffusionPipeline`], [`StableDiffusionControlNetPipeline`], and [`StableDiffusionXLPipeline`].
|
||||
Prompt weighting works by increasing or decreasing the scale of the text embedding vector that corresponds to its concept in the prompt because you may not necessarily want the model to focus on all concepts equally. The easiest way to prepare the prompt embeddings is to use [Stable Diffusion Long Prompt Weighted Embedding](https://github.com/xhinker/sd_embed) (sd_embed). Once you have the prompt-weighted embeddings, you can pass them to any pipeline that has a [prompt_embeds](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline.__call__.prompt_embeds) (and optionally [negative_prompt_embeds](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline.__call__.negative_prompt_embeds)) parameter, such as [`StableDiffusionPipeline`], [`StableDiffusionControlNetPipeline`], and [`StableDiffusionXLPipeline`].
|
||||
|
||||
<Tip>
|
||||
|
||||
@@ -223,136 +223,99 @@ If your favorite pipeline doesn't have a `prompt_embeds` parameter, please open
|
||||
|
||||
</Tip>
|
||||
|
||||
This guide will show you how to weight and blend your prompts with Compel in 🤗 Diffusers.
|
||||
This guide will show you how to weight your prompts with sd_embed.
|
||||
|
||||
Before you begin, make sure you have the latest version of Compel installed:
|
||||
Before you begin, make sure you have the latest version of sd_embed installed:
|
||||
|
||||
```py
|
||||
# uncomment to install in Colab
|
||||
#!pip install compel --upgrade
|
||||
```bash
|
||||
pip install git+https://github.com/xhinker/sd_embed.git@main
|
||||
```
|
||||
|
||||
For this guide, let's generate an image with the prompt `"a red cat playing with a ball"` using the [`StableDiffusionPipeline`]:
|
||||
For this example, let's use [`StableDiffusionXLPipeline`].
|
||||
|
||||
```py
|
||||
from diffusers import StableDiffusionPipeline, UniPCMultistepScheduler
|
||||
from diffusers import StableDiffusionXLPipeline, UniPCMultistepScheduler
|
||||
import torch
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_safetensors=True)
|
||||
pipe = StableDiffusionXLPipeline.from_pretrained("Lykon/dreamshaper-xl-1-0", torch_dtype=torch.float16)
|
||||
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
||||
pipe.to("cuda")
|
||||
```
|
||||
|
||||
prompt = "a red cat playing with a ball"
|
||||
To upweight or downweight a concept, surround the text with parentheses. More parentheses applies a heavier weight on the text. You can also append a numerical multiplier to the text to indicate how much you want to increase or decrease its weights by.
|
||||
|
||||
generator = torch.Generator(device="cpu").manual_seed(33)
|
||||
| format | multiplier |
|
||||
|---|---|
|
||||
| `(hippo)` | increase by 1.1x |
|
||||
| `((hippo))` | increase by 1.21x |
|
||||
| `(hippo:1.5)` | increase by 1.5x |
|
||||
| `(hippo:0.5)` | decrease by 4x |
|
||||
|
||||
image = pipe(prompt, generator=generator, num_inference_steps=20).images[0]
|
||||
Create a prompt and use a combination of parentheses and numerical multipliers to upweight various text.
|
||||
|
||||
```py
|
||||
from sd_embed.embedding_funcs import get_weighted_text_embeddings_sdxl
|
||||
|
||||
prompt = """A whimsical and creative image depicting a hybrid creature that is a mix of a waffle and a hippopotamus.
|
||||
This imaginative creature features the distinctive, bulky body of a hippo,
|
||||
but with a texture and appearance resembling a golden-brown, crispy waffle.
|
||||
The creature might have elements like waffle squares across its skin and a syrup-like sheen.
|
||||
It's set in a surreal environment that playfully combines a natural water habitat of a hippo with elements of a breakfast table setting,
|
||||
possibly including oversized utensils or plates in the background.
|
||||
The image should evoke a sense of playful absurdity and culinary fantasy.
|
||||
"""
|
||||
|
||||
neg_prompt = """\
|
||||
skin spots,acnes,skin blemishes,age spot,(ugly:1.2),(duplicate:1.2),(morbid:1.21),(mutilated:1.2),\
|
||||
(tranny:1.2),mutated hands,(poorly drawn hands:1.5),blurry,(bad anatomy:1.2),(bad proportions:1.3),\
|
||||
extra limbs,(disfigured:1.2),(missing arms:1.2),(extra legs:1.2),(fused fingers:1.5),\
|
||||
(too many fingers:1.5),(unclear eyes:1.2),lowers,bad hands,missing fingers,extra digit,\
|
||||
bad hands,missing fingers,(extra arms and legs),(worst quality:2),(low quality:2),\
|
||||
(normal quality:2),lowres,((monochrome)),((grayscale))
|
||||
"""
|
||||
```
|
||||
|
||||
Use the `get_weighted_text_embeddings_sdxl` function to generate the prompt embeddings and the negative prompt embeddings. It'll also generated the pooled and negative pooled prompt embeddings since you're using the SDXL model.
|
||||
|
||||
> [!TIP]
|
||||
> You can safely ignore the error message below about the token index length exceeding the models maximum sequence length. All your tokens will be used in the embedding process.
|
||||
>
|
||||
> ```
|
||||
> Token indices sequence length is longer than the specified maximum sequence length for this model
|
||||
> ```
|
||||
|
||||
```py
|
||||
(
|
||||
prompt_embeds,
|
||||
prompt_neg_embeds,
|
||||
pooled_prompt_embeds,
|
||||
negative_pooled_prompt_embeds
|
||||
) = get_weighted_text_embeddings_sdxl(
|
||||
pipe,
|
||||
prompt=prompt,
|
||||
neg_prompt=neg_prompt
|
||||
)
|
||||
|
||||
image = pipe(
|
||||
prompt_embeds=prompt_embeds,
|
||||
negative_prompt_embeds=prompt_neg_embeds,
|
||||
pooled_prompt_embeds=pooled_prompt_embeds,
|
||||
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
||||
num_inference_steps=30,
|
||||
height=1024,
|
||||
width=1024 + 512,
|
||||
guidance_scale=4.0,
|
||||
generator=torch.Generator("cuda").manual_seed(2)
|
||||
).images[0]
|
||||
image
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/forest_0.png"/>
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sd_embed_sdxl.png"/>
|
||||
</div>
|
||||
|
||||
### Weighting
|
||||
|
||||
You'll notice there is no "ball" in the image! Let's use compel to upweight the concept of "ball" in the prompt. Create a [`Compel`](https://github.com/damian0815/compel/blob/main/doc/compel.md#compel-objects) object, and pass it a tokenizer and text encoder:
|
||||
|
||||
```py
|
||||
from compel import Compel
|
||||
|
||||
compel_proc = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder)
|
||||
```
|
||||
|
||||
compel uses `+` or `-` to increase or decrease the weight of a word in the prompt. To increase the weight of "ball":
|
||||
|
||||
<Tip>
|
||||
|
||||
`+` corresponds to the value `1.1`, `++` corresponds to `1.1^2`, and so on. Similarly, `-` corresponds to `0.9` and `--` corresponds to `0.9^2`. Feel free to experiment with adding more `+` or `-` in your prompt!
|
||||
|
||||
</Tip>
|
||||
|
||||
```py
|
||||
prompt = "a red cat playing with a ball++"
|
||||
```
|
||||
|
||||
Pass the prompt to `compel_proc` to create the new prompt embeddings which are passed to the pipeline:
|
||||
|
||||
```py
|
||||
prompt_embeds = compel_proc(prompt)
|
||||
generator = torch.manual_seed(33)
|
||||
|
||||
image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0]
|
||||
image
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/forest_1.png"/>
|
||||
</div>
|
||||
|
||||
To downweight parts of the prompt, use the `-` suffix:
|
||||
|
||||
```py
|
||||
prompt = "a red------- cat playing with a ball"
|
||||
prompt_embeds = compel_proc(prompt)
|
||||
|
||||
generator = torch.manual_seed(33)
|
||||
|
||||
image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0]
|
||||
image
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-neg.png"/>
|
||||
</div>
|
||||
|
||||
You can even up or downweight multiple concepts in the same prompt:
|
||||
|
||||
```py
|
||||
prompt = "a red cat++ playing with a ball----"
|
||||
prompt_embeds = compel_proc(prompt)
|
||||
|
||||
generator = torch.manual_seed(33)
|
||||
|
||||
image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0]
|
||||
image
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-pos-neg.png"/>
|
||||
</div>
|
||||
|
||||
### Blending
|
||||
|
||||
You can also create a weighted *blend* of prompts by adding `.blend()` to a list of prompts and passing it some weights. Your blend may not always produce the result you expect because it breaks some assumptions about how the text encoder functions, so just have fun and experiment with it!
|
||||
|
||||
```py
|
||||
prompt_embeds = compel_proc('("a red cat playing with a ball", "jungle").blend(0.7, 0.8)')
|
||||
generator = torch.Generator(device="cuda").manual_seed(33)
|
||||
|
||||
image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0]
|
||||
image
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-blend.png"/>
|
||||
</div>
|
||||
|
||||
### Conjunction
|
||||
|
||||
A conjunction diffuses each prompt independently and concatenates their results by their weighted sum. Add `.and()` to the end of a list of prompts to create a conjunction:
|
||||
|
||||
```py
|
||||
prompt_embeds = compel_proc('["a red cat", "playing with a", "ball"].and()')
|
||||
generator = torch.Generator(device="cuda").manual_seed(55)
|
||||
|
||||
image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0]
|
||||
image
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-conj.png"/>
|
||||
</div>
|
||||
> [!TIP]
|
||||
> Refer to the [sd_embed](https://github.com/xhinker/sd_embed) repository for additional details about long prompt weighting for FLUX.1, Stable Cascade, and Stable Diffusion 1.5.
|
||||
|
||||
### Textual inversion
|
||||
|
||||
@@ -363,35 +326,63 @@ Create a pipeline and use the [`~loaders.TextualInversionLoaderMixin.load_textua
|
||||
```py
|
||||
import torch
|
||||
from diffusers import StableDiffusionPipeline
|
||||
from compel import Compel, DiffusersTextualInversionManager
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16,
|
||||
use_safetensors=True, variant="fp16").to("cuda")
|
||||
"stable-diffusion-v1-5/stable-diffusion-v1-5",
|
||||
torch_dtype=torch.float16,
|
||||
).to("cuda")
|
||||
pipe.load_textual_inversion("sd-concepts-library/midjourney-style")
|
||||
```
|
||||
|
||||
Compel provides a `DiffusersTextualInversionManager` class to simplify prompt weighting with textual inversion. Instantiate `DiffusersTextualInversionManager` and pass it to the `Compel` class:
|
||||
Add the `<midjourney-style>` text to the prompt to trigger the textual inversion.
|
||||
|
||||
```py
|
||||
textual_inversion_manager = DiffusersTextualInversionManager(pipe)
|
||||
compel_proc = Compel(
|
||||
tokenizer=pipe.tokenizer,
|
||||
text_encoder=pipe.text_encoder,
|
||||
textual_inversion_manager=textual_inversion_manager)
|
||||
from sd_embed.embedding_funcs import get_weighted_text_embeddings_sd15
|
||||
|
||||
prompt = """<midjourney-style> A whimsical and creative image depicting a hybrid creature that is a mix of a waffle and a hippopotamus.
|
||||
This imaginative creature features the distinctive, bulky body of a hippo,
|
||||
but with a texture and appearance resembling a golden-brown, crispy waffle.
|
||||
The creature might have elements like waffle squares across its skin and a syrup-like sheen.
|
||||
It's set in a surreal environment that playfully combines a natural water habitat of a hippo with elements of a breakfast table setting,
|
||||
possibly including oversized utensils or plates in the background.
|
||||
The image should evoke a sense of playful absurdity and culinary fantasy.
|
||||
"""
|
||||
|
||||
neg_prompt = """\
|
||||
skin spots,acnes,skin blemishes,age spot,(ugly:1.2),(duplicate:1.2),(morbid:1.21),(mutilated:1.2),\
|
||||
(tranny:1.2),mutated hands,(poorly drawn hands:1.5),blurry,(bad anatomy:1.2),(bad proportions:1.3),\
|
||||
extra limbs,(disfigured:1.2),(missing arms:1.2),(extra legs:1.2),(fused fingers:1.5),\
|
||||
(too many fingers:1.5),(unclear eyes:1.2),lowers,bad hands,missing fingers,extra digit,\
|
||||
bad hands,missing fingers,(extra arms and legs),(worst quality:2),(low quality:2),\
|
||||
(normal quality:2),lowres,((monochrome)),((grayscale))
|
||||
"""
|
||||
```
|
||||
|
||||
Incorporate the concept to condition a prompt with using the `<concept>` syntax:
|
||||
Use the `get_weighted_text_embeddings_sd15` function to generate the prompt embeddings and the negative prompt embeddings.
|
||||
|
||||
```py
|
||||
prompt_embeds = compel_proc('("A red cat++ playing with a ball <midjourney-style>")')
|
||||
(
|
||||
prompt_embeds,
|
||||
prompt_neg_embeds,
|
||||
) = get_weighted_text_embeddings_sd15(
|
||||
pipe,
|
||||
prompt=prompt,
|
||||
neg_prompt=neg_prompt
|
||||
)
|
||||
|
||||
image = pipe(prompt_embeds=prompt_embeds).images[0]
|
||||
image = pipe(
|
||||
prompt_embeds=prompt_embeds,
|
||||
negative_prompt_embeds=prompt_neg_embeds,
|
||||
height=768,
|
||||
width=896,
|
||||
guidance_scale=4.0,
|
||||
generator=torch.Generator("cuda").manual_seed(2)
|
||||
).images[0]
|
||||
image
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-text-inversion.png"/>
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sd_embed_textual_inversion.png"/>
|
||||
</div>
|
||||
|
||||
### DreamBooth
|
||||
@@ -401,70 +392,44 @@ image
|
||||
```py
|
||||
import torch
|
||||
from diffusers import DiffusionPipeline, UniPCMultistepScheduler
|
||||
from compel import Compel
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained("sd-dreambooth-library/dndcoverart-v1", torch_dtype=torch.float16).to("cuda")
|
||||
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
||||
```
|
||||
|
||||
Create a `Compel` class with a tokenizer and text encoder, and pass your prompt to it. Depending on the model you use, you'll need to incorporate the model's unique identifier into your prompt. For example, the `dndcoverart-v1` model uses the identifier `dndcoverart`:
|
||||
Depending on the model you use, you'll need to incorporate the model's unique identifier into your prompt. For example, the `dndcoverart-v1` model uses the identifier `dndcoverart`:
|
||||
|
||||
```py
|
||||
compel_proc = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder)
|
||||
prompt_embeds = compel_proc('("magazine cover of a dndcoverart dragon, high quality, intricate details, larry elmore art style").and()')
|
||||
image = pipe(prompt_embeds=prompt_embeds).images[0]
|
||||
image
|
||||
```
|
||||
from sd_embed.embedding_funcs import get_weighted_text_embeddings_sd15
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-dreambooth.png"/>
|
||||
</div>
|
||||
prompt = """dndcoverart of A whimsical and creative image depicting a hybrid creature that is a mix of a waffle and a hippopotamus.
|
||||
This imaginative creature features the distinctive, bulky body of a hippo,
|
||||
but with a texture and appearance resembling a golden-brown, crispy waffle.
|
||||
The creature might have elements like waffle squares across its skin and a syrup-like sheen.
|
||||
It's set in a surreal environment that playfully combines a natural water habitat of a hippo with elements of a breakfast table setting,
|
||||
possibly including oversized utensils or plates in the background.
|
||||
The image should evoke a sense of playful absurdity and culinary fantasy.
|
||||
"""
|
||||
|
||||
### Stable Diffusion XL
|
||||
neg_prompt = """\
|
||||
skin spots,acnes,skin blemishes,age spot,(ugly:1.2),(duplicate:1.2),(morbid:1.21),(mutilated:1.2),\
|
||||
(tranny:1.2),mutated hands,(poorly drawn hands:1.5),blurry,(bad anatomy:1.2),(bad proportions:1.3),\
|
||||
extra limbs,(disfigured:1.2),(missing arms:1.2),(extra legs:1.2),(fused fingers:1.5),\
|
||||
(too many fingers:1.5),(unclear eyes:1.2),lowers,bad hands,missing fingers,extra digit,\
|
||||
bad hands,missing fingers,(extra arms and legs),(worst quality:2),(low quality:2),\
|
||||
(normal quality:2),lowres,((monochrome)),((grayscale))
|
||||
"""
|
||||
|
||||
Stable Diffusion XL (SDXL) has two tokenizers and text encoders so it's usage is a bit different. To address this, you should pass both tokenizers and encoders to the `Compel` class:
|
||||
|
||||
```py
|
||||
from compel import Compel, ReturnedEmbeddingsType
|
||||
from diffusers import DiffusionPipeline
|
||||
from diffusers.utils import make_image_grid
|
||||
import torch
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained(
|
||||
"stabilityai/stable-diffusion-xl-base-1.0",
|
||||
variant="fp16",
|
||||
use_safetensors=True,
|
||||
torch_dtype=torch.float16
|
||||
).to("cuda")
|
||||
|
||||
compel = Compel(
|
||||
tokenizer=[pipeline.tokenizer, pipeline.tokenizer_2] ,
|
||||
text_encoder=[pipeline.text_encoder, pipeline.text_encoder_2],
|
||||
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
|
||||
requires_pooled=[False, True]
|
||||
(
|
||||
prompt_embeds
|
||||
, prompt_neg_embeds
|
||||
) = get_weighted_text_embeddings_sd15(
|
||||
pipe
|
||||
, prompt = prompt
|
||||
, neg_prompt = neg_prompt
|
||||
)
|
||||
```
|
||||
|
||||
This time, let's upweight "ball" by a factor of 1.5 for the first prompt, and downweight "ball" by 0.6 for the second prompt. The [`StableDiffusionXLPipeline`] also requires [`pooled_prompt_embeds`](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLInpaintPipeline.__call__.pooled_prompt_embeds) (and optionally [`negative_pooled_prompt_embeds`](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLInpaintPipeline.__call__.negative_pooled_prompt_embeds)) so you should pass those to the pipeline along with the conditioning tensors:
|
||||
|
||||
```py
|
||||
# apply weights
|
||||
prompt = ["a red cat playing with a (ball)1.5", "a red cat playing with a (ball)0.6"]
|
||||
conditioning, pooled = compel(prompt)
|
||||
|
||||
# generate image
|
||||
generator = [torch.Generator().manual_seed(33) for _ in range(len(prompt))]
|
||||
images = pipeline(prompt_embeds=conditioning, pooled_prompt_embeds=pooled, generator=generator, num_inference_steps=30).images
|
||||
make_image_grid(images, rows=1, cols=2)
|
||||
```
|
||||
|
||||
<div class="flex gap-4">
|
||||
<div>
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/sdxl_ball1.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">"a red cat playing with a (ball)1.5"</figcaption>
|
||||
</div>
|
||||
<div>
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/sdxl_ball2.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">"a red cat playing with a (ball)0.6"</figcaption>
|
||||
</div>
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sd_embed_dreambooth.png"/>
|
||||
</div>
|
||||
|
||||
@@ -106,7 +106,7 @@ Let's try it out!
|
||||
|
||||
## Deconstruct the Stable Diffusion pipeline
|
||||
|
||||
Stable Diffusion is a text-to-image *latent diffusion* model. It is called a latent diffusion model because it works with a lower-dimensional representation of the image instead of the actual pixel space, which makes it more memory efficient. The encoder compresses the image into a smaller representation, and a decoder to convert the compressed representation back into an image. For text-to-image models, you'll need a tokenizer and an encoder to generate text embeddings. From the previous example, you already know you need a UNet model and a scheduler.
|
||||
Stable Diffusion is a text-to-image *latent diffusion* model. It is called a latent diffusion model because it works with a lower-dimensional representation of the image instead of the actual pixel space, which makes it more memory efficient. The encoder compresses the image into a smaller representation, and a decoder converts the compressed representation back into an image. For text-to-image models, you'll need a tokenizer and an encoder to generate text embeddings. From the previous example, you already know you need a UNet model and a scheduler.
|
||||
|
||||
As you can see, this is already more complex than the DDPM pipeline which only contains a UNet model. The Stable Diffusion model has three separate pretrained models.
|
||||
|
||||
|
||||
@@ -40,9 +40,9 @@ Training examples show how to pretrain or fine-tune diffusion models for a varie
|
||||
| [**Text-to-Image fine-tuning**](./text_to_image) | ✅ | ✅ |
|
||||
| [**Textual Inversion**](./textual_inversion) | ✅ | - | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb)
|
||||
| [**Dreambooth**](./dreambooth) | ✅ | - | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb)
|
||||
| [**ControlNet**](./controlnet) | ✅ | ✅ | -
|
||||
| [**InstructPix2Pix**](./instruct_pix2pix) | ✅ | ✅ | -
|
||||
| [**Reinforcement Learning for Control**](./reinforcement_learning) | - | - | coming soon.
|
||||
| [**ControlNet**](./controlnet) | ✅ | ✅ | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/controlnet.ipynb)
|
||||
| [**InstructPix2Pix**](./instruct_pix2pix) | ✅ | ✅ | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/InstructPix2Pix_using_diffusers.ipynb)
|
||||
| [**Reinforcement Learning for Control**](./reinforcement_learning) | - | - | [Notebook1](https://github.com/huggingface/notebooks/blob/main/diffusers/reinforcement_learning_for_control.ipynb), [Notebook2](https://github.com/huggingface/notebooks/blob/main/diffusers/reinforcement_learning_with_diffusers.ipynb)
|
||||
|
||||
## Community
|
||||
|
||||
|
||||
@@ -24,32 +24,35 @@ Please also check out our [Community Scripts](https://github.com/huggingface/dif
|
||||
| Speech to Image | Using automatic-speech-recognition to transcribe text and Stable Diffusion to generate images | [Speech to Image](#speech-to-image) |[Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/speech_to_image.ipynb) | [Mikail Duzenli](https://github.com/MikailINTech)
|
||||
| Wild Card Stable Diffusion | Stable Diffusion Pipeline that supports prompts that contain wildcard terms (indicated by surrounding double underscores), with values instantiated randomly from a corresponding txt file or a dictionary of possible values | [Wildcard Stable Diffusion](#wildcard-stable-diffusion) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/wildcard_stable_diffusion.ipynb) | [Shyam Sudhakaran](https://github.com/shyamsn97) |
|
||||
| [Composable Stable Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/) | Stable Diffusion Pipeline that supports prompts that contain "|" in prompts (as an AND condition) and weights (separated by "|" as well) to positively / negatively weight prompts. | [Composable Stable Diffusion](#composable-stable-diffusion) | - | [Mark Rich](https://github.com/MarkRich) |
|
||||
| Seed Resizing Stable Diffusion | Stable Diffusion Pipeline that supports resizing an image and retaining the concepts of the 512 by 512 generation. | [Seed Resizing](#seed-resizing) | - | [Mark Rich](https://github.com/MarkRich) |
|
||||
| Imagic Stable Diffusion | Stable Diffusion Pipeline that enables writing a text prompt to edit an existing image | [Imagic Stable Diffusion](#imagic-stable-diffusion) | - | [Mark Rich](https://github.com/MarkRich) |
|
||||
| Seed Resizing Stable Diffusion | Stable Diffusion Pipeline that supports resizing an image and retaining the concepts of the 512 by 512 generation. | [Seed Resizing](#seed-resizing) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/seed_resizing.ipynb) | [Mark Rich](https://github.com/MarkRich) |
|
||||
| Imagic Stable Diffusion | Stable Diffusion Pipeline that enables writing a text prompt to edit an existing image | [Imagic Stable Diffusion](#imagic-stable-diffusion) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/imagic_stable_diffusion.ipynb) | [Mark Rich](https://github.com/MarkRich) |
|
||||
| Multilingual Stable Diffusion | Stable Diffusion Pipeline that supports prompts in 50 different languages. | [Multilingual Stable Diffusion](#multilingual-stable-diffusion-pipeline) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/multilingual_stable_diffusion.ipynb) | [Juan Carlos Piñeros](https://github.com/juancopi81) |
|
||||
| GlueGen Stable Diffusion | Stable Diffusion Pipeline that supports prompts in different languages using GlueGen adapter. | [GlueGen Stable Diffusion](#gluegen-stable-diffusion-pipeline) | - | [Phạm Hồng Vinh](https://github.com/rootonchair) |
|
||||
| GlueGen Stable Diffusion | Stable Diffusion Pipeline that supports prompts in different languages using GlueGen adapter. | [GlueGen Stable Diffusion](#gluegen-stable-diffusion-pipeline) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/gluegen_stable_diffusion.ipynb) | [Phạm Hồng Vinh](https://github.com/rootonchair) |
|
||||
| Image to Image Inpainting Stable Diffusion | Stable Diffusion Pipeline that enables the overlaying of two images and subsequent inpainting | [Image to Image Inpainting Stable Diffusion](#image-to-image-inpainting-stable-diffusion) | - | [Alex McKinney](https://github.com/vvvm23) |
|
||||
| Text Based Inpainting Stable Diffusion | Stable Diffusion Inpainting Pipeline that enables passing a text prompt to generate the mask for inpainting | [Text Based Inpainting Stable Diffusion](#text-based-inpainting-stable-diffusion) | - | [Dhruv Karan](https://github.com/unography) |
|
||||
| Text Based Inpainting Stable Diffusion | Stable Diffusion Inpainting Pipeline that enables passing a text prompt to generate the mask for inpainting | [Text Based Inpainting Stable Diffusion](#text-based-inpainting-stable-diffusion) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/text_based_inpainting_stable_dffusion.ipynb) | [Dhruv Karan](https://github.com/unography) |
|
||||
| Bit Diffusion | Diffusion on discrete data | [Bit Diffusion](#bit-diffusion) | - | [Stuti R.](https://github.com/kingstut) |
|
||||
| K-Diffusion Stable Diffusion | Run Stable Diffusion with any of [K-Diffusion's samplers](https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/sampling.py) | [Stable Diffusion with K Diffusion](#stable-diffusion-with-k-diffusion) | - | [Patrick von Platen](https://github.com/patrickvonplaten/) |
|
||||
| Checkpoint Merger Pipeline | Diffusion Pipeline that enables merging of saved model checkpoints | [Checkpoint Merger Pipeline](#checkpoint-merger-pipeline) | - | [Naga Sai Abhinay Devarinti](https://github.com/Abhinay1997/) |
|
||||
| Stable Diffusion v1.1-1.4 Comparison | Run all 4 model checkpoints for Stable Diffusion and compare their results together | [Stable Diffusion Comparison](#stable-diffusion-comparisons) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/stable_diffusion_comparison.ipynb) | [Suvaditya Mukherjee](https://github.com/suvadityamuk) |
|
||||
| MagicMix | Diffusion Pipeline for semantic mixing of an image and a text prompt | [MagicMix](#magic-mix) | - | [Partho Das](https://github.com/daspartho) |
|
||||
| MagicMix | Diffusion Pipeline for semantic mixing of an image and a text prompt | [MagicMix](#magic-mix) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/magic_mix.ipynb) | [Partho Das](https://github.com/daspartho) |
|
||||
| Stable UnCLIP | Diffusion Pipeline for combining prior model (generate clip image embedding from text, UnCLIPPipeline `"kakaobrain/karlo-v1-alpha"`) and decoder pipeline (decode clip image embedding to image, StableDiffusionImageVariationPipeline `"lambdalabs/sd-image-variations-diffusers"` ). | [Stable UnCLIP](#stable-unclip) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/stable_unclip.ipynb) | [Ray Wang](https://wrong.wang) |
|
||||
| UnCLIP Text Interpolation Pipeline | Diffusion Pipeline that allows passing two prompts and produces images while interpolating between the text-embeddings of the two prompts | [UnCLIP Text Interpolation Pipeline](#unclip-text-interpolation-pipeline) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/unclip_text_interpolation.ipynb)| [Naga Sai Abhinay Devarinti](https://github.com/Abhinay1997/) |
|
||||
| UnCLIP Image Interpolation Pipeline | Diffusion Pipeline that allows passing two images/image_embeddings and produces images while interpolating between their image-embeddings | [UnCLIP Image Interpolation Pipeline](#unclip-image-interpolation-pipeline) | - | [Naga Sai Abhinay Devarinti](https://github.com/Abhinay1997/) |
|
||||
| UnCLIP Image Interpolation Pipeline | Diffusion Pipeline that allows passing two images/image_embeddings and produces images while interpolating between their image-embeddings | [UnCLIP Image Interpolation Pipeline](#unclip-image-interpolation-pipeline) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/unclip_image_interpolation.ipynb)| [Naga Sai Abhinay Devarinti](https://github.com/Abhinay1997/) |
|
||||
| DDIM Noise Comparative Analysis Pipeline | Investigating how the diffusion models learn visual concepts from each noise level (which is a contribution of [P2 weighting (CVPR 2022)](https://arxiv.org/abs/2204.00227)) | [DDIM Noise Comparative Analysis Pipeline](#ddim-noise-comparative-analysis-pipeline) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/ddim_noise_comparative_analysis.ipynb)| [Aengus (Duc-Anh)](https://github.com/aengusng8) |
|
||||
| CLIP Guided Img2Img Stable Diffusion Pipeline | Doing CLIP guidance for image to image generation with Stable Diffusion | [CLIP Guided Img2Img Stable Diffusion](#clip-guided-img2img-stable-diffusion) | - | [Nipun Jindal](https://github.com/nipunjindal/) |
|
||||
| CLIP Guided Img2Img Stable Diffusion Pipeline | Doing CLIP guidance for image to image generation with Stable Diffusion | [CLIP Guided Img2Img Stable Diffusion](#clip-guided-img2img-stable-diffusion) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/clip_guided_img2img_stable_diffusion.ipynb) | [Nipun Jindal](https://github.com/nipunjindal/) |
|
||||
| TensorRT Stable Diffusion Text to Image Pipeline | Accelerates the Stable Diffusion Text2Image Pipeline using TensorRT | [TensorRT Stable Diffusion Text to Image Pipeline](#tensorrt-text2image-stable-diffusion-pipeline) | - | [Asfiya Baig](https://github.com/asfiyab-nvidia) |
|
||||
| EDICT Image Editing Pipeline | Diffusion pipeline for text-guided image editing | [EDICT Image Editing Pipeline](#edict-image-editing-pipeline) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/edict_image_pipeline.ipynb) | [Joqsan Azocar](https://github.com/Joqsan) |
|
||||
| Stable Diffusion RePaint | Stable Diffusion pipeline using [RePaint](https://arxiv.org/abs/2201.09865) for inpainting. | [Stable Diffusion RePaint](#stable-diffusion-repaint )|[Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/stable_diffusion_repaint.ipynb)| [Markus Pobitzer](https://github.com/Markus-Pobitzer) |
|
||||
| TensorRT Stable Diffusion Image to Image Pipeline | Accelerates the Stable Diffusion Image2Image Pipeline using TensorRT | [TensorRT Stable Diffusion Image to Image Pipeline](#tensorrt-image2image-stable-diffusion-pipeline) | - | [Asfiya Baig](https://github.com/asfiyab-nvidia) |
|
||||
| Stable Diffusion IPEX Pipeline | Accelerate Stable Diffusion inference pipeline with BF16/FP32 precision on Intel Xeon CPUs with [IPEX](https://github.com/intel/intel-extension-for-pytorch) | [Stable Diffusion on IPEX](#stable-diffusion-on-ipex) | - | [Yingjie Han](https://github.com/yingjie-han/) |
|
||||
| CLIP Guided Images Mixing Stable Diffusion Pipeline | Сombine images using usual diffusion models. | [CLIP Guided Images Mixing Using Stable Diffusion](#clip-guided-images-mixing-with-stable-diffusion) | - | [Karachev Denis](https://github.com/TheDenk) |
|
||||
| CLIP Guided Images Mixing Stable Diffusion Pipeline | Сombine images using usual diffusion models. | [CLIP Guided Images Mixing Using Stable Diffusion](#clip-guided-images-mixing-with-stable-diffusion) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/clip_guided_images_mixing_with_stable_diffusion.ipynb) | [Karachev Denis](https://github.com/TheDenk) |
|
||||
| TensorRT Stable Diffusion Inpainting Pipeline | Accelerates the Stable Diffusion Inpainting Pipeline using TensorRT | [TensorRT Stable Diffusion Inpainting Pipeline](#tensorrt-inpainting-stable-diffusion-pipeline) | - | [Asfiya Baig](https://github.com/asfiyab-nvidia) |
|
||||
| IADB Pipeline | Implementation of [Iterative α-(de)Blending: a Minimalist Deterministic Diffusion Model](https://arxiv.org/abs/2305.03486) | [IADB Pipeline](#iadb-pipeline) | - | [Thomas Chambon](https://github.com/tchambon)
|
||||
| Zero1to3 Pipeline | Implementation of [Zero-1-to-3: Zero-shot One Image to 3D Object](https://arxiv.org/abs/2303.11328) | [Zero1to3 Pipeline](#zero1to3-pipeline) | - | [Xin Kong](https://github.com/kxhit) |
|
||||
| Stable Diffusion XL Long Weighted Prompt Pipeline | A pipeline support unlimited length of prompt and negative prompt, use A1111 style of prompt weighting | [Stable Diffusion XL Long Weighted Prompt Pipeline](#stable-diffusion-xl-long-weighted-prompt-pipeline) | [](https://colab.research.google.com/drive/1LsqilswLR40XLLcp6XFOl5nKb_wOe26W?usp=sharing) | [Andrew Zhu](https://xhinker.medium.com/) |
|
||||
| Stable Diffusion Mixture Tiling Pipeline SD 1.5 | A pipeline generates cohesive images by integrating multiple diffusion processes, each focused on a specific image region and considering boundary effects for smooth blending | [Stable Diffusion Mixture Tiling Pipeline SD 1.5](#stable-diffusion-mixture-tiling-pipeline-sd-15) | [](https://huggingface.co/spaces/albarji/mixture-of-diffusers) | [Álvaro B Jiménez](https://github.com/albarji/) |
|
||||
| Stable Diffusion Mixture Canvas Pipeline SD 1.5 | A pipeline generates cohesive images by integrating multiple diffusion processes, each focused on a specific image region and considering boundary effects for smooth blending. Works by defining a list of Text2Image region objects that detail the region of influence of each diffuser. | [Stable Diffusion Mixture Canvas Pipeline SD 1.5](#stable-diffusion-mixture-canvas-pipeline-sd-15) | [](https://huggingface.co/spaces/albarji/mixture-of-diffusers) | [Álvaro B Jiménez](https://github.com/albarji/) |
|
||||
| Stable Diffusion Mixture Tiling Pipeline SDXL | A pipeline generates cohesive images by integrating multiple diffusion processes, each focused on a specific image region and considering boundary effects for smooth blending | [Stable Diffusion Mixture Tiling Pipeline SDXL](#stable-diffusion-mixture-tiling-pipeline-sdxl) | [](https://huggingface.co/spaces/elismasilva/mixture-of-diffusers-sdxl-tiling) | [Eliseu Silva](https://github.com/DEVAIEXP/) |
|
||||
| FABRIC - Stable Diffusion with feedback Pipeline | pipeline supports feedback from liked and disliked images | [Stable Diffusion Fabric Pipeline](#stable-diffusion-fabric-pipeline) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/stable_diffusion_fabric.ipynb)| [Shauray Singh](https://shauray8.github.io/about_shauray/) |
|
||||
| sketch inpaint - Inpainting with non-inpaint Stable Diffusion | sketch inpaint much like in automatic1111 | [Masked Im2Im Stable Diffusion Pipeline](#stable-diffusion-masked-im2im) | - | [Anatoly Belikov](https://github.com/noskill) |
|
||||
| sketch inpaint xl - Inpainting with non-inpaint Stable Diffusion | sketch inpaint much like in automatic1111 | [Masked Im2Im Stable Diffusion XL Pipeline](#stable-diffusion-xl-masked-im2im) | - | [Anatoly Belikov](https://github.com/noskill) |
|
||||
@@ -57,7 +60,7 @@ Please also check out our [Community Scripts](https://github.com/huggingface/dif
|
||||
| Latent Consistency Pipeline | Implementation of [Latent Consistency Models: Synthesizing High-Resolution Images with Few-Step Inference](https://arxiv.org/abs/2310.04378) | [Latent Consistency Pipeline](#latent-consistency-pipeline) | - | [Simian Luo](https://github.com/luosiallen) |
|
||||
| Latent Consistency Img2img Pipeline | Img2img pipeline for Latent Consistency Models | [Latent Consistency Img2Img Pipeline](#latent-consistency-img2img-pipeline) | - | [Logan Zoellner](https://github.com/nagolinc) |
|
||||
| Latent Consistency Interpolation Pipeline | Interpolate the latent space of Latent Consistency Models with multiple prompts | [Latent Consistency Interpolation Pipeline](#latent-consistency-interpolation-pipeline) | [](https://colab.research.google.com/drive/1pK3NrLWJSiJsBynLns1K1-IDTW9zbPvl?usp=sharing) | [Aryan V S](https://github.com/a-r-r-o-w) |
|
||||
| SDE Drag Pipeline | The pipeline supports drag editing of images using stochastic differential equations | [SDE Drag Pipeline](#sde-drag-pipeline) | - | [NieShen](https://github.com/NieShenRuc) [Fengqi Zhu](https://github.com/Monohydroxides) |
|
||||
| SDE Drag Pipeline | The pipeline supports drag editing of images using stochastic differential equations | [SDE Drag Pipeline](#sde-drag-pipeline) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/sde_drag.ipynb) | [NieShen](https://github.com/NieShenRuc) [Fengqi Zhu](https://github.com/Monohydroxides) |
|
||||
| Regional Prompting Pipeline | Assign multiple prompts for different regions | [Regional Prompting Pipeline](#regional-prompting-pipeline) | - | [hako-mikan](https://github.com/hako-mikan) |
|
||||
| LDM3D-sr (LDM3D upscaler) | Upscale low resolution RGB and depth inputs to high resolution | [StableDiffusionUpscaleLDM3D Pipeline](https://github.com/estelleafl/diffusers/tree/ldm3d_upscaler_community/examples/community#stablediffusionupscaleldm3d-pipeline) | - | [Estelle Aflalo](https://github.com/estelleafl) |
|
||||
| AnimateDiff ControlNet Pipeline | Combines AnimateDiff with precise motion control using ControlNets | [AnimateDiff ControlNet Pipeline](#animatediff-controlnet-pipeline) | [](https://colab.research.google.com/drive/1SKboYeGjEQmQPWoFC0aLYpBlYdHXkvAu?usp=sharing) | [Aryan V S](https://github.com/a-r-r-o-w) and [Edoardo Botta](https://github.com/EdoardoBotta) |
|
||||
@@ -78,6 +81,7 @@ PIXART-α Controlnet pipeline | Implementation of the controlnet model for pixar
|
||||
| HunyuanDiT Differential Diffusion Pipeline | Applies [Differential Diffusion](https://github.com/exx8/differential-diffusion) to [HunyuanDiT](https://github.com/huggingface/diffusers/pull/8240). | [HunyuanDiT with Differential Diffusion](#hunyuandit-with-differential-diffusion) | [](https://colab.research.google.com/drive/1v44a5fpzyr4Ffr4v2XBQ7BajzG874N4P?usp=sharing) | [Monjoy Choudhury](https://github.com/MnCSSJ4x) |
|
||||
| [🪆Matryoshka Diffusion Models](https://huggingface.co/papers/2310.15111) | A diffusion process that denoises inputs at multiple resolutions jointly and uses a NestedUNet architecture where features and parameters for small scale inputs are nested within those of the large scales. See [original codebase](https://github.com/apple/ml-mdm). | [🪆Matryoshka Diffusion Models](#matryoshka-diffusion-models) | [](https://huggingface.co/spaces/pcuenq/mdm) [](https://colab.research.google.com/gist/tolgacangoz/1f54875fc7aeaabcf284ebde64820966/matryoshka_hf.ipynb) | [M. Tolga Cangöz](https://github.com/tolgacangoz) |
|
||||
| Stable Diffusion XL Attentive Eraser Pipeline |[[AAAI2025 Oral] Attentive Eraser](https://github.com/Anonym0u3/AttentiveEraser) is a novel tuning-free method that enhances object removal capabilities in pre-trained diffusion models.|[Stable Diffusion XL Attentive Eraser Pipeline](#stable-diffusion-xl-attentive-eraser-pipeline)|-|[Wenhao Sun](https://github.com/Anonym0u3) and [Benlei Cui](https://github.com/Benny079)|
|
||||
| Perturbed-Attention Guidance |StableDiffusionPAGPipeline is a modification of StableDiffusionPipeline to support Perturbed-Attention Guidance (PAG).|[Perturbed-Attention Guidance](#perturbed-attention-guidance)|[Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/perturbed_attention_guidance.ipynb)|[Hyoungwon Cho](https://github.com/HyoungwonCho)|
|
||||
|
||||
To load a custom pipeline you just need to pass the `custom_pipeline` argument to `DiffusionPipeline`, as one of the files in `diffusers/examples/community`. Feel free to send a PR with your own pipelines, we will merge them quickly.
|
||||
|
||||
@@ -948,10 +952,15 @@ image.save('./imagic/imagic_image_alpha_2.png')
|
||||
Test seed resizing. Originally generate an image in 512 by 512, then generate image with same seed at 512 by 592 using seed resizing. Finally, generate 512 by 592 using original stable diffusion pipeline.
|
||||
|
||||
```python
|
||||
import os
|
||||
import torch as th
|
||||
import numpy as np
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
# Ensure the save directory exists or create it
|
||||
save_dir = './seed_resize/'
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
|
||||
has_cuda = th.cuda.is_available()
|
||||
device = th.device('cpu' if not has_cuda else 'cuda')
|
||||
|
||||
@@ -965,7 +974,6 @@ def dummy(images, **kwargs):
|
||||
|
||||
pipe.safety_checker = dummy
|
||||
|
||||
|
||||
images = []
|
||||
th.manual_seed(0)
|
||||
generator = th.Generator("cuda").manual_seed(0)
|
||||
@@ -984,15 +992,14 @@ res = pipe(
|
||||
width=width,
|
||||
generator=generator)
|
||||
image = res.images[0]
|
||||
image.save('./seed_resize/seed_resize_{w}_{h}_image.png'.format(w=width, h=height))
|
||||
|
||||
image.save(os.path.join(save_dir, 'seed_resize_{w}_{h}_image.png'.format(w=width, h=height)))
|
||||
|
||||
th.manual_seed(0)
|
||||
generator = th.Generator("cuda").manual_seed(0)
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
custom_pipeline="/home/mark/open_source/diffusers/examples/community/"
|
||||
custom_pipeline="seed_resize_stable_diffusion"
|
||||
).to(device)
|
||||
|
||||
width = 512
|
||||
@@ -1006,11 +1013,11 @@ res = pipe(
|
||||
width=width,
|
||||
generator=generator)
|
||||
image = res.images[0]
|
||||
image.save('./seed_resize/seed_resize_{w}_{h}_image.png'.format(w=width, h=height))
|
||||
image.save(os.path.join(save_dir, 'seed_resize_{w}_{h}_image.png'.format(w=width, h=height)))
|
||||
|
||||
pipe_compare = DiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
custom_pipeline="/home/mark/open_source/diffusers/examples/community/"
|
||||
custom_pipeline="seed_resize_stable_diffusion"
|
||||
).to(device)
|
||||
|
||||
res = pipe_compare(
|
||||
@@ -1023,7 +1030,7 @@ res = pipe_compare(
|
||||
)
|
||||
|
||||
image = res.images[0]
|
||||
image.save('./seed_resize/seed_resize_{w}_{h}_image_compare.png'.format(w=width, h=height))
|
||||
image.save(os.path.join(save_dir, 'seed_resize_{w}_{h}_image_compare.png'.format(w=width, h=height)))
|
||||
```
|
||||
|
||||
### Multilingual Stable Diffusion Pipeline
|
||||
@@ -1100,38 +1107,100 @@ GlueGen is a minimal adapter that allows alignment between any encoder (Text Enc
|
||||
Make sure you downloaded `gluenet_French_clip_overnorm_over3_noln.ckpt` for French (there are also pre-trained weights for Chinese, Italian, Japanese, Spanish or train your own) at [GlueGen's official repo](https://github.com/salesforce/GlueGen/tree/main).
|
||||
|
||||
```python
|
||||
from PIL import Image
|
||||
|
||||
import os
|
||||
import gc
|
||||
import urllib.request
|
||||
import torch
|
||||
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
from transformers import XLMRobertaTokenizer, XLMRobertaForMaskedLM, CLIPTokenizer, CLIPTextModel
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
if __name__ == "__main__":
|
||||
device = "cuda"
|
||||
# Download checkpoints
|
||||
CHECKPOINTS = [
|
||||
"https://storage.googleapis.com/sfr-gluegen-data-research/checkpoints_all/gluenet_checkpoint/gluenet_Chinese_clip_overnorm_over3_noln.ckpt",
|
||||
"https://storage.googleapis.com/sfr-gluegen-data-research/checkpoints_all/gluenet_checkpoint/gluenet_French_clip_overnorm_over3_noln.ckpt",
|
||||
"https://storage.googleapis.com/sfr-gluegen-data-research/checkpoints_all/gluenet_checkpoint/gluenet_Italian_clip_overnorm_over3_noln.ckpt",
|
||||
"https://storage.googleapis.com/sfr-gluegen-data-research/checkpoints_all/gluenet_checkpoint/gluenet_Japanese_clip_overnorm_over3_noln.ckpt",
|
||||
"https://storage.googleapis.com/sfr-gluegen-data-research/checkpoints_all/gluenet_checkpoint/gluenet_Spanish_clip_overnorm_over3_noln.ckpt",
|
||||
"https://storage.googleapis.com/sfr-gluegen-data-research/checkpoints_all/gluenet_checkpoint/gluenet_sound2img_audioclip_us8k.ckpt"
|
||||
]
|
||||
|
||||
lm_model_id = "xlm-roberta-large"
|
||||
token_max_length = 77
|
||||
LANGUAGE_PROMPTS = {
|
||||
"French": "une voiture sur la plage",
|
||||
#"Chinese": "海滩上的一辆车",
|
||||
#"Italian": "una macchina sulla spiaggia",
|
||||
#"Japanese": "浜辺の車",
|
||||
#"Spanish": "un coche en la playa"
|
||||
}
|
||||
|
||||
text_encoder = AutoModel.from_pretrained(lm_model_id)
|
||||
tokenizer = AutoTokenizer.from_pretrained(lm_model_id, model_max_length=token_max_length, use_fast=False)
|
||||
def download_checkpoints(checkpoint_dir):
|
||||
os.makedirs(checkpoint_dir, exist_ok=True)
|
||||
for url in CHECKPOINTS:
|
||||
filename = os.path.join(checkpoint_dir, os.path.basename(url))
|
||||
if not os.path.exists(filename):
|
||||
print(f"Downloading {filename}...")
|
||||
urllib.request.urlretrieve(url, filename)
|
||||
print(f"Downloaded {filename}")
|
||||
else:
|
||||
print(f"Checkpoint {filename} already exists, skipping download.")
|
||||
return checkpoint_dir
|
||||
|
||||
tensor_norm = torch.Tensor([[43.8203],[28.3668],[27.9345],[28.0084],[28.2958],[28.2576],[28.3373],[28.2695],[28.4097],[28.2790],[28.2825],[28.2807],[28.2775],[28.2708],[28.2682],[28.2624],[28.2589],[28.2611],[28.2616],[28.2639],[28.2613],[28.2566],[28.2615],[28.2665],[28.2799],[28.2885],[28.2852],[28.2863],[28.2780],[28.2818],[28.2764],[28.2532],[28.2412],[28.2336],[28.2514],[28.2734],[28.2763],[28.2977],[28.2971],[28.2948],[28.2818],[28.2676],[28.2831],[28.2890],[28.2979],[28.2999],[28.3117],[28.3363],[28.3554],[28.3626],[28.3589],[28.3597],[28.3543],[28.3660],[28.3731],[28.3717],[28.3812],[28.3753],[28.3810],[28.3777],[28.3693],[28.3713],[28.3670],[28.3691],[28.3679],[28.3624],[28.3703],[28.3703],[28.3720],[28.3594],[28.3576],[28.3562],[28.3438],[28.3376],[28.3389],[28.3433],[28.3191]])
|
||||
def load_checkpoint(pipeline, checkpoint_path, device):
|
||||
state_dict = torch.load(checkpoint_path, map_location=device)
|
||||
state_dict = state_dict.get("state_dict", state_dict)
|
||||
missing_keys, unexpected_keys = pipeline.unet.load_state_dict(state_dict, strict=False)
|
||||
return pipeline
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained(
|
||||
"stable-diffusion-v1-5/stable-diffusion-v1-5",
|
||||
text_encoder=text_encoder,
|
||||
tokenizer=tokenizer,
|
||||
custom_pipeline="gluegen"
|
||||
).to(device)
|
||||
pipeline.load_language_adapter("gluenet_French_clip_overnorm_over3_noln.ckpt", num_token=token_max_length, dim=1024, dim_out=768, tensor_norm=tensor_norm)
|
||||
def generate_image(pipeline, prompt, device, output_path):
|
||||
with torch.inference_mode():
|
||||
image = pipeline(
|
||||
prompt,
|
||||
generator=torch.Generator(device=device).manual_seed(42),
|
||||
num_inference_steps=50
|
||||
).images[0]
|
||||
image.save(output_path)
|
||||
print(f"Image saved to {output_path}")
|
||||
|
||||
prompt = "une voiture sur la plage"
|
||||
checkpoint_dir = download_checkpoints("./checkpoints_all/gluenet_checkpoint")
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
print(f"Using device: {device}")
|
||||
|
||||
generator = torch.Generator(device=device).manual_seed(42)
|
||||
image = pipeline(prompt, generator=generator).images[0]
|
||||
image.save("gluegen_output_fr.png")
|
||||
tokenizer = XLMRobertaTokenizer.from_pretrained("xlm-roberta-base", use_fast=False)
|
||||
model = XLMRobertaForMaskedLM.from_pretrained("xlm-roberta-base").to(device)
|
||||
inputs = tokenizer("Ceci est une phrase incomplète avec un [MASK].", return_tensors="pt").to(device)
|
||||
with torch.inference_mode():
|
||||
_ = model(**inputs)
|
||||
|
||||
|
||||
clip_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
||||
clip_text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14").to(device)
|
||||
|
||||
# Initialize pipeline
|
||||
pipeline = DiffusionPipeline.from_pretrained(
|
||||
"stable-diffusion-v1-5/stable-diffusion-v1-5",
|
||||
text_encoder=clip_text_encoder,
|
||||
tokenizer=clip_tokenizer,
|
||||
custom_pipeline="gluegen",
|
||||
safety_checker=None
|
||||
).to(device)
|
||||
|
||||
os.makedirs("outputs", exist_ok=True)
|
||||
|
||||
# Generate images
|
||||
for language, prompt in LANGUAGE_PROMPTS.items():
|
||||
|
||||
checkpoint_file = f"gluenet_{language}_clip_overnorm_over3_noln.ckpt"
|
||||
checkpoint_path = os.path.join(checkpoint_dir, checkpoint_file)
|
||||
try:
|
||||
pipeline = load_checkpoint(pipeline, checkpoint_path, device)
|
||||
output_path = f"outputs/gluegen_output_{language.lower()}.png"
|
||||
generate_image(pipeline, prompt, device, output_path)
|
||||
except Exception as e:
|
||||
print(f"Error processing {language} model: {e}")
|
||||
continue
|
||||
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
gc.collect()
|
||||
```
|
||||
|
||||
Which will produce:
|
||||
@@ -1182,28 +1251,49 @@ Currently uses the CLIPSeg model for mask generation, then calls the standard St
|
||||
```python
|
||||
from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
from PIL import Image
|
||||
import requests
|
||||
import torch
|
||||
|
||||
# Load CLIPSeg model and processor
|
||||
processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
|
||||
model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
|
||||
model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined").to("cuda")
|
||||
|
||||
# Load Stable Diffusion Inpainting Pipeline with custom pipeline
|
||||
pipe = DiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-inpainting",
|
||||
custom_pipeline="text_inpainting",
|
||||
segmentation_model=model,
|
||||
segmentation_processor=processor
|
||||
)
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
).to("cuda")
|
||||
|
||||
# Load input image
|
||||
url = "https://github.com/timojl/clipseg/blob/master/example_image.jpg?raw=true"
|
||||
image = Image.open(requests.get(url, stream=True).raw).resize((512, 512))
|
||||
text = "a glass" # will mask out this text
|
||||
prompt = "a cup" # the masked out region will be replaced with this
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
image = pipe(image=image, text=text, prompt=prompt).images[0]
|
||||
# Step 1: Resize input image for CLIPSeg (224x224)
|
||||
segmentation_input = image.resize((224, 224))
|
||||
|
||||
# Step 2: Generate segmentation mask
|
||||
text = "a glass" # Object to mask
|
||||
inputs = processor(text=text, images=segmentation_input, return_tensors="pt").to("cuda")
|
||||
|
||||
with torch.no_grad():
|
||||
mask = model(**inputs).logits.sigmoid() # Get segmentation mask
|
||||
|
||||
# Resize mask back to 512x512 for SD inpainting
|
||||
mask = torch.nn.functional.interpolate(mask.unsqueeze(0), size=(512, 512), mode="bilinear").squeeze(0)
|
||||
|
||||
# Step 3: Resize input image for Stable Diffusion
|
||||
image = image.resize((512, 512))
|
||||
|
||||
# Step 4: Run inpainting with Stable Diffusion
|
||||
prompt = "a cup" # The masked-out region will be replaced with this
|
||||
result = pipe(image=image, mask=mask, prompt=prompt,text=text).images[0]
|
||||
|
||||
# Save output
|
||||
result.save("inpainting_output.png")
|
||||
print("Inpainting completed. Image saved as 'inpainting_output.png'.")
|
||||
```
|
||||
|
||||
### Bit Diffusion
|
||||
@@ -1379,8 +1469,10 @@ There are 3 parameters for the method-
|
||||
Here is an example usage-
|
||||
|
||||
```python
|
||||
import requests
|
||||
from diffusers import DiffusionPipeline, DDIMScheduler
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
@@ -1388,9 +1480,11 @@ pipe = DiffusionPipeline.from_pretrained(
|
||||
scheduler=DDIMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler"),
|
||||
).to('cuda')
|
||||
|
||||
img = Image.open('phone.jpg')
|
||||
url = "https://user-images.githubusercontent.com/59410571/209578593-141467c7-d831-4792-8b9a-b17dc5e47816.jpg"
|
||||
response = requests.get(url)
|
||||
image = Image.open(BytesIO(response.content)).convert("RGB") # Convert to RGB to avoid issues
|
||||
mix_img = pipe(
|
||||
img,
|
||||
image,
|
||||
prompt='bed',
|
||||
kmin=0.3,
|
||||
kmax=0.5,
|
||||
@@ -1543,6 +1637,8 @@ This Diffusion Pipeline takes two images or an image_embeddings tensor of size 2
|
||||
import torch
|
||||
from diffusers import DiffusionPipeline
|
||||
from PIL import Image
|
||||
import requests
|
||||
from io import BytesIO
|
||||
|
||||
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
|
||||
dtype = torch.float16 if torch.cuda.is_available() else torch.bfloat16
|
||||
@@ -1554,13 +1650,25 @@ pipe = DiffusionPipeline.from_pretrained(
|
||||
)
|
||||
pipe.to(device)
|
||||
|
||||
images = [Image.open('./starry_night.jpg'), Image.open('./flowers.jpg')]
|
||||
# List of image URLs
|
||||
image_urls = [
|
||||
'https://camo.githubusercontent.com/ef13c8059b12947c0d5e8d3ea88900de6bf1cd76bbf61ace3928e824c491290e/68747470733a2f2f68756767696e67666163652e636f2f64617461736574732f4e616761536169416268696e61792f556e434c4950496d616765496e746572706f6c6174696f6e53616d706c65732f7265736f6c76652f6d61696e2f7374617272795f6e696768742e6a7067',
|
||||
'https://camo.githubusercontent.com/d1947ab7c49ae3f550c28409d5e8b120df48e456559cf4557306c0848337702c/68747470733a2f2f68756767696e67666163652e636f2f64617461736574732f4e616761536169416268696e61792f556e434c4950496d616765496e746572706f6c6174696f6e53616d706c65732f7265736f6c76652f6d61696e2f666c6f776572732e6a7067'
|
||||
]
|
||||
|
||||
# Open images from URLs
|
||||
images = []
|
||||
for url in image_urls:
|
||||
response = requests.get(url)
|
||||
img = Image.open(BytesIO(response.content))
|
||||
images.append(img)
|
||||
|
||||
# For best results keep the prompts close in length to each other. Of course, feel free to try out with differing lengths.
|
||||
generator = torch.Generator(device=device).manual_seed(42)
|
||||
|
||||
output = pipe(image=images, steps=6, generator=generator)
|
||||
|
||||
for i,image in enumerate(output.images):
|
||||
for i, image in enumerate(output.images):
|
||||
image.save('starry_to_flowers_%s.jpg' % i)
|
||||
```
|
||||
|
||||
@@ -1637,37 +1745,51 @@ from diffusers import DiffusionPipeline
|
||||
from PIL import Image
|
||||
from transformers import CLIPImageProcessor, CLIPModel
|
||||
|
||||
# Load CLIP model and feature extractor
|
||||
feature_extractor = CLIPImageProcessor.from_pretrained(
|
||||
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
|
||||
)
|
||||
clip_model = CLIPModel.from_pretrained(
|
||||
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16
|
||||
)
|
||||
|
||||
# Load guided pipeline
|
||||
guided_pipeline = DiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
# custom_pipeline="clip_guided_stable_diffusion",
|
||||
custom_pipeline="/home/njindal/diffusers/examples/community/clip_guided_stable_diffusion.py",
|
||||
custom_pipeline="clip_guided_stable_diffusion_img2img",
|
||||
clip_model=clip_model,
|
||||
feature_extractor=feature_extractor,
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
guided_pipeline.enable_attention_slicing()
|
||||
guided_pipeline = guided_pipeline.to("cuda")
|
||||
|
||||
# Define prompt and fetch image
|
||||
prompt = "fantasy book cover, full moon, fantasy forest landscape, golden vector elements, fantasy magic, dark light night, intricate, elegant, sharp focus, illustration, highly detailed, digital painting, concept art, matte, art by WLOP and Artgerm and Albert Bierstadt, masterpiece"
|
||||
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
||||
response = requests.get(url)
|
||||
init_image = Image.open(BytesIO(response.content)).convert("RGB")
|
||||
edit_image = Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
# Run the pipeline
|
||||
image = guided_pipeline(
|
||||
prompt=prompt,
|
||||
num_inference_steps=30,
|
||||
image=init_image,
|
||||
strength=0.75,
|
||||
guidance_scale=7.5,
|
||||
clip_guidance_scale=100,
|
||||
num_cutouts=4,
|
||||
use_cutouts=False,
|
||||
height=512, # Height of the output image
|
||||
width=512, # Width of the output image
|
||||
image=edit_image, # Input image to guide the diffusion
|
||||
strength=0.75, # How much to transform the input image
|
||||
num_inference_steps=30, # Number of diffusion steps
|
||||
guidance_scale=7.5, # Scale of the classifier-free guidance
|
||||
clip_guidance_scale=100, # Scale of the CLIP guidance
|
||||
num_images_per_prompt=1, # Generate one image per prompt
|
||||
eta=0.0, # Noise scheduling parameter
|
||||
num_cutouts=4, # Number of cutouts for CLIP guidance
|
||||
use_cutouts=False, # Whether to use cutouts
|
||||
output_type="pil", # Output as PIL image
|
||||
).images[0]
|
||||
display(image)
|
||||
|
||||
# Display the generated image
|
||||
image.show()
|
||||
|
||||
```
|
||||
|
||||
Init Image
|
||||
@@ -2244,6 +2366,85 @@ CLIP guided stable diffusion images mixing pipeline allows to combine two images
|
||||
This approach is using (optional) CoCa model to avoid writing image description.
|
||||
[More code examples](https://github.com/TheDenk/images_mixing)
|
||||
|
||||
### Example Images Mixing (with CoCa)
|
||||
|
||||
```python
|
||||
import PIL
|
||||
import torch
|
||||
import requests
|
||||
import open_clip
|
||||
from open_clip import SimpleTokenizer
|
||||
from io import BytesIO
|
||||
from diffusers import DiffusionPipeline
|
||||
from transformers import CLIPImageProcessor, CLIPModel
|
||||
|
||||
|
||||
def download_image(url):
|
||||
response = requests.get(url)
|
||||
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
# Loading additional models
|
||||
feature_extractor = CLIPImageProcessor.from_pretrained(
|
||||
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
|
||||
)
|
||||
clip_model = CLIPModel.from_pretrained(
|
||||
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16
|
||||
)
|
||||
coca_model = open_clip.create_model('coca_ViT-L-14', pretrained='laion2B-s13B-b90k').to('cuda')
|
||||
coca_model.dtype = torch.float16
|
||||
coca_transform = open_clip.image_transform(
|
||||
coca_model.visual.image_size,
|
||||
is_train=False,
|
||||
mean=getattr(coca_model.visual, 'image_mean', None),
|
||||
std=getattr(coca_model.visual, 'image_std', None),
|
||||
)
|
||||
coca_tokenizer = SimpleTokenizer()
|
||||
|
||||
# Pipeline creating
|
||||
mixing_pipeline = DiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
custom_pipeline="clip_guided_images_mixing_stable_diffusion",
|
||||
clip_model=clip_model,
|
||||
feature_extractor=feature_extractor,
|
||||
coca_model=coca_model,
|
||||
coca_tokenizer=coca_tokenizer,
|
||||
coca_transform=coca_transform,
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
mixing_pipeline.enable_attention_slicing()
|
||||
mixing_pipeline = mixing_pipeline.to("cuda")
|
||||
|
||||
# Pipeline running
|
||||
generator = torch.Generator(device="cuda").manual_seed(17)
|
||||
|
||||
def download_image(url):
|
||||
response = requests.get(url)
|
||||
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
content_image = download_image("https://huggingface.co/datasets/TheDenk/images_mixing/resolve/main/boromir.jpg")
|
||||
style_image = download_image("https://huggingface.co/datasets/TheDenk/images_mixing/resolve/main/gigachad.jpg")
|
||||
|
||||
pipe_images = mixing_pipeline(
|
||||
num_inference_steps=50,
|
||||
content_image=content_image,
|
||||
style_image=style_image,
|
||||
noise_strength=0.65,
|
||||
slerp_latent_style_strength=0.9,
|
||||
slerp_prompt_style_strength=0.1,
|
||||
slerp_clip_image_style_strength=0.1,
|
||||
guidance_scale=9.0,
|
||||
batch_size=1,
|
||||
clip_guidance_scale=100,
|
||||
generator=generator,
|
||||
).images
|
||||
|
||||
output_path = "mixed_output.jpg"
|
||||
pipe_images[0].save(output_path)
|
||||
print(f"Image saved successfully at {output_path}")
|
||||
```
|
||||
|
||||

|
||||
|
||||
### Stable Diffusion XL Long Weighted Prompt Pipeline
|
||||
|
||||
This SDXL pipeline supports unlimited length prompt and negative prompt, compatible with A1111 prompt weighted style.
|
||||
@@ -2309,83 +2510,7 @@ In the above code, the `prompt2` is appended to the `prompt`, which is more than
|
||||
|
||||
For more results, checkout [PR #6114](https://github.com/huggingface/diffusers/pull/6114).
|
||||
|
||||
### Example Images Mixing (with CoCa)
|
||||
|
||||
```python
|
||||
import requests
|
||||
from io import BytesIO
|
||||
|
||||
import PIL
|
||||
import torch
|
||||
import open_clip
|
||||
from open_clip import SimpleTokenizer
|
||||
from diffusers import DiffusionPipeline
|
||||
from transformers import CLIPImageProcessor, CLIPModel
|
||||
|
||||
|
||||
def download_image(url):
|
||||
response = requests.get(url)
|
||||
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
# Loading additional models
|
||||
feature_extractor = CLIPImageProcessor.from_pretrained(
|
||||
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
|
||||
)
|
||||
clip_model = CLIPModel.from_pretrained(
|
||||
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16
|
||||
)
|
||||
coca_model = open_clip.create_model('coca_ViT-L-14', pretrained='laion2B-s13B-b90k').to('cuda')
|
||||
coca_model.dtype = torch.float16
|
||||
coca_transform = open_clip.image_transform(
|
||||
coca_model.visual.image_size,
|
||||
is_train=False,
|
||||
mean=getattr(coca_model.visual, 'image_mean', None),
|
||||
std=getattr(coca_model.visual, 'image_std', None),
|
||||
)
|
||||
coca_tokenizer = SimpleTokenizer()
|
||||
|
||||
# Pipeline creating
|
||||
mixing_pipeline = DiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
custom_pipeline="clip_guided_images_mixing_stable_diffusion",
|
||||
clip_model=clip_model,
|
||||
feature_extractor=feature_extractor,
|
||||
coca_model=coca_model,
|
||||
coca_tokenizer=coca_tokenizer,
|
||||
coca_transform=coca_transform,
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
mixing_pipeline.enable_attention_slicing()
|
||||
mixing_pipeline = mixing_pipeline.to("cuda")
|
||||
|
||||
# Pipeline running
|
||||
generator = torch.Generator(device="cuda").manual_seed(17)
|
||||
|
||||
def download_image(url):
|
||||
response = requests.get(url)
|
||||
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
content_image = download_image("https://huggingface.co/datasets/TheDenk/images_mixing/resolve/main/boromir.jpg")
|
||||
style_image = download_image("https://huggingface.co/datasets/TheDenk/images_mixing/resolve/main/gigachad.jpg")
|
||||
|
||||
pipe_images = mixing_pipeline(
|
||||
num_inference_steps=50,
|
||||
content_image=content_image,
|
||||
style_image=style_image,
|
||||
noise_strength=0.65,
|
||||
slerp_latent_style_strength=0.9,
|
||||
slerp_prompt_style_strength=0.1,
|
||||
slerp_clip_image_style_strength=0.1,
|
||||
guidance_scale=9.0,
|
||||
batch_size=1,
|
||||
clip_guidance_scale=100,
|
||||
generator=generator,
|
||||
).images
|
||||
```
|
||||
|
||||

|
||||
|
||||
### Stable Diffusion Mixture Tiling
|
||||
### Stable Diffusion Mixture Tiling Pipeline SD 1.5
|
||||
|
||||
This pipeline uses the Mixture. Refer to the [Mixture](https://arxiv.org/abs/2302.02412) paper for more details.
|
||||
|
||||
@@ -2416,6 +2541,95 @@ image = pipeline(
|
||||
|
||||

|
||||
|
||||
### Stable Diffusion Mixture Canvas Pipeline SD 1.5
|
||||
|
||||
This pipeline uses the Mixture. Refer to the [Mixture](https://arxiv.org/abs/2302.02412) paper for more details.
|
||||
|
||||
```python
|
||||
from PIL import Image
|
||||
from diffusers import LMSDiscreteScheduler, DiffusionPipeline
|
||||
from diffusers.pipelines.pipeline_utils import Image2ImageRegion, Text2ImageRegion, preprocess_image
|
||||
|
||||
|
||||
# Load and preprocess guide image
|
||||
iic_image = preprocess_image(Image.open("input_image.png").convert("RGB"))
|
||||
|
||||
# Create scheduler and model (similar to StableDiffusionPipeline)
|
||||
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
||||
pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=scheduler).to("cuda:0", custom_pipeline="mixture_canvas")
|
||||
pipeline.to("cuda")
|
||||
|
||||
# Mixture of Diffusers generation
|
||||
output = pipeline(
|
||||
canvas_height=800,
|
||||
canvas_width=352,
|
||||
regions=[
|
||||
Text2ImageRegion(0, 800, 0, 352, guidance_scale=8,
|
||||
prompt=f"best quality, masterpiece, WLOP, sakimichan, art contest winner on pixiv, 8K, intricate details, wet effects, rain drops, ethereal, mysterious, futuristic, UHD, HDR, cinematic lighting, in a beautiful forest, rainy day, award winning, trending on artstation, beautiful confident cheerful young woman, wearing a futuristic sleeveless dress, ultra beautiful detailed eyes, hyper-detailed face, complex, perfect, model, textured, chiaroscuro, professional make-up, realistic, figure in frame, "),
|
||||
Image2ImageRegion(352-800, 352, 0, 352, reference_image=iic_image, strength=1.0),
|
||||
],
|
||||
num_inference_steps=100,
|
||||
seed=5525475061,
|
||||
)["images"][0]
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
### Stable Diffusion Mixture Tiling Pipeline SDXL
|
||||
|
||||
This pipeline uses the Mixture. Refer to the [Mixture](https://arxiv.org/abs/2302.02412) paper for more details.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler, AutoencoderKL
|
||||
|
||||
device="cuda"
|
||||
|
||||
# Load fixed vae (optional)
|
||||
vae = AutoencoderKL.from_pretrained(
|
||||
"madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
|
||||
).to(device)
|
||||
|
||||
# Create scheduler and model (similar to StableDiffusionPipeline)
|
||||
model_id="stablediffusionapi/yamermix-v8-vae"
|
||||
scheduler = DPMSolverMultistepScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
||||
pipe = DiffusionPipeline.from_pretrained(
|
||||
model_id,
|
||||
torch_dtype=torch.float16,
|
||||
vae=vae,
|
||||
custom_pipeline="mixture_tiling_sdxl",
|
||||
scheduler=scheduler,
|
||||
use_safetensors=False
|
||||
).to(device)
|
||||
|
||||
pipe.enable_model_cpu_offload()
|
||||
pipe.enable_vae_tiling()
|
||||
pipe.enable_vae_slicing()
|
||||
|
||||
generator = torch.Generator(device).manual_seed(297984183)
|
||||
|
||||
# Mixture of Diffusers generation
|
||||
image = pipe(
|
||||
prompt=[[
|
||||
"A charming house in the countryside, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
||||
"A dirt road in the countryside crossing pastures, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
||||
"An old and rusty giant robot lying on a dirt road, by jakub rozalski, dark sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece"
|
||||
]],
|
||||
tile_height=1024,
|
||||
tile_width=1280,
|
||||
tile_row_overlap=0,
|
||||
tile_col_overlap=256,
|
||||
guidance_scale_tiles=[[7, 7, 7]], # or guidance_scale=7 if is the same for all prompts
|
||||
height=1024,
|
||||
width=3840,
|
||||
generator=generator,
|
||||
num_inference_steps=30,
|
||||
)["images"][0]
|
||||
```
|
||||
|
||||

|
||||
|
||||
### TensorRT Inpainting Stable Diffusion Pipeline
|
||||
|
||||
The TensorRT Pipeline can be used to accelerate the Inpainting Stable Diffusion Inference run.
|
||||
@@ -2458,41 +2672,6 @@ image = pipe(prompt, image=input_image, mask_image=mask_image, strength=0.75,).i
|
||||
image.save('tensorrt_inpaint_mecha_robot.png')
|
||||
```
|
||||
|
||||
### Stable Diffusion Mixture Canvas
|
||||
|
||||
This pipeline uses the Mixture. Refer to the [Mixture](https://arxiv.org/abs/2302.02412) paper for more details.
|
||||
|
||||
```python
|
||||
from PIL import Image
|
||||
from diffusers import LMSDiscreteScheduler, DiffusionPipeline
|
||||
from diffusers.pipelines.pipeline_utils import Image2ImageRegion, Text2ImageRegion, preprocess_image
|
||||
|
||||
|
||||
# Load and preprocess guide image
|
||||
iic_image = preprocess_image(Image.open("input_image.png").convert("RGB"))
|
||||
|
||||
# Create scheduler and model (similar to StableDiffusionPipeline)
|
||||
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
||||
pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=scheduler).to("cuda:0", custom_pipeline="mixture_canvas")
|
||||
pipeline.to("cuda")
|
||||
|
||||
# Mixture of Diffusers generation
|
||||
output = pipeline(
|
||||
canvas_height=800,
|
||||
canvas_width=352,
|
||||
regions=[
|
||||
Text2ImageRegion(0, 800, 0, 352, guidance_scale=8,
|
||||
prompt=f"best quality, masterpiece, WLOP, sakimichan, art contest winner on pixiv, 8K, intricate details, wet effects, rain drops, ethereal, mysterious, futuristic, UHD, HDR, cinematic lighting, in a beautiful forest, rainy day, award winning, trending on artstation, beautiful confident cheerful young woman, wearing a futuristic sleeveless dress, ultra beautiful detailed eyes, hyper-detailed face, complex, perfect, model, textured, chiaroscuro, professional make-up, realistic, figure in frame, "),
|
||||
Image2ImageRegion(352-800, 352, 0, 352, reference_image=iic_image, strength=1.0),
|
||||
],
|
||||
num_inference_steps=100,
|
||||
seed=5525475061,
|
||||
)["images"][0]
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
### IADB pipeline
|
||||
|
||||
This pipeline is the implementation of the [α-(de)Blending: a Minimalist Deterministic Diffusion Model](https://arxiv.org/abs/2305.03486) paper.
|
||||
@@ -3909,33 +4088,89 @@ This pipeline provides drag-and-drop image editing using stochastic differential
|
||||
See [paper](https://arxiv.org/abs/2311.01410), [paper page](https://ml-gsai.github.io/SDE-Drag-demo/), [original repo](https://github.com/ML-GSAI/SDE-Drag) for more information.
|
||||
|
||||
```py
|
||||
import PIL
|
||||
import torch
|
||||
from diffusers import DDIMScheduler, DiffusionPipeline
|
||||
from PIL import Image
|
||||
import requests
|
||||
from io import BytesIO
|
||||
import numpy as np
|
||||
|
||||
# Load the pipeline
|
||||
model_path = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
||||
scheduler = DDIMScheduler.from_pretrained(model_path, subfolder="scheduler")
|
||||
pipe = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler, custom_pipeline="sde_drag")
|
||||
pipe.to('cuda')
|
||||
|
||||
# To save GPU memory, torch.float16 can be used, but it may compromise image quality.
|
||||
# If not training LoRA, please avoid using torch.float16
|
||||
# pipe.to(torch.float16)
|
||||
# Ensure the model is moved to the GPU
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
pipe.to(device)
|
||||
|
||||
# Provide prompt, image, mask image, and the starting and target points for drag editing.
|
||||
prompt = "prompt of the image"
|
||||
image = PIL.Image.open('/path/to/image')
|
||||
mask_image = PIL.Image.open('/path/to/mask_image')
|
||||
source_points = [[123, 456]]
|
||||
target_points = [[234, 567]]
|
||||
# Function to load image from URL
|
||||
def load_image_from_url(url):
|
||||
response = requests.get(url)
|
||||
return Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
# train_lora is optional, and in most cases, using train_lora can better preserve consistency with the original image.
|
||||
pipe.train_lora(prompt, image)
|
||||
# Function to prepare mask
|
||||
def prepare_mask(mask_image):
|
||||
# Convert to grayscale
|
||||
mask = mask_image.convert("L")
|
||||
return mask
|
||||
|
||||
output = pipe(prompt, image, mask_image, source_points, target_points)
|
||||
output_image = PIL.Image.fromarray(output)
|
||||
# Function to convert numpy array to PIL Image
|
||||
def array_to_pil(array):
|
||||
# Ensure the array is in uint8 format
|
||||
if array.dtype != np.uint8:
|
||||
if array.max() <= 1.0:
|
||||
array = (array * 255).astype(np.uint8)
|
||||
else:
|
||||
array = array.astype(np.uint8)
|
||||
|
||||
# Handle different array shapes
|
||||
if len(array.shape) == 3:
|
||||
if array.shape[0] == 3: # If channels first
|
||||
array = array.transpose(1, 2, 0)
|
||||
return Image.fromarray(array)
|
||||
elif len(array.shape) == 4: # If batch dimension
|
||||
array = array[0]
|
||||
if array.shape[0] == 3: # If channels first
|
||||
array = array.transpose(1, 2, 0)
|
||||
return Image.fromarray(array)
|
||||
else:
|
||||
raise ValueError(f"Unexpected array shape: {array.shape}")
|
||||
|
||||
# Image and mask URLs
|
||||
image_url = 'https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png'
|
||||
mask_url = 'https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png'
|
||||
|
||||
# Load the images
|
||||
image = load_image_from_url(image_url)
|
||||
mask_image = load_image_from_url(mask_url)
|
||||
|
||||
# Resize images to a size that's compatible with the model's latent space
|
||||
image = image.resize((512, 512))
|
||||
mask_image = mask_image.resize((512, 512))
|
||||
|
||||
# Prepare the mask (keep as PIL Image)
|
||||
mask = prepare_mask(mask_image)
|
||||
|
||||
# Provide the prompt and points for drag editing
|
||||
prompt = "A cute dog"
|
||||
source_points = [[32, 32]] # Adjusted for 512x512 image
|
||||
target_points = [[64, 64]] # Adjusted for 512x512 image
|
||||
|
||||
# Generate the output image
|
||||
output_array = pipe(
|
||||
prompt=prompt,
|
||||
image=image,
|
||||
mask_image=mask,
|
||||
source_points=source_points,
|
||||
target_points=target_points
|
||||
)
|
||||
|
||||
# Convert output array to PIL Image and save
|
||||
output_image = array_to_pil(output_array)
|
||||
output_image.save("./output.png")
|
||||
print("Output image saved as './output.png'")
|
||||
|
||||
```
|
||||
|
||||
### Instaflow Pipeline
|
||||
|
||||
@@ -92,9 +92,13 @@ class CheckpointMergerPipeline(DiffusionPipeline):
|
||||
token = kwargs.pop("token", None)
|
||||
variant = kwargs.pop("variant", None)
|
||||
revision = kwargs.pop("revision", None)
|
||||
torch_dtype = kwargs.pop("torch_dtype", None)
|
||||
torch_dtype = kwargs.pop("torch_dtype", torch.float32)
|
||||
device_map = kwargs.pop("device_map", None)
|
||||
|
||||
if not isinstance(torch_dtype, torch.dtype):
|
||||
torch_dtype = torch.float32
|
||||
print(f"Passed `torch_dtype` {torch_dtype} is not a `torch.dtype`. Defaulting to `torch.float32`.")
|
||||
|
||||
alpha = kwargs.pop("alpha", 0.5)
|
||||
interp = kwargs.pop("interp", None)
|
||||
|
||||
|
||||
1237
examples/community/mixture_tiling_sdxl.py
Normal file
1237
examples/community/mixture_tiling_sdxl.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -87,7 +87,7 @@ def calculate_shift(
|
||||
base_seq_len: int = 256,
|
||||
max_seq_len: int = 4096,
|
||||
base_shift: float = 0.5,
|
||||
max_shift: float = 1.16,
|
||||
max_shift: float = 1.15,
|
||||
):
|
||||
m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
|
||||
b = base_shift - m * base_seq_len
|
||||
@@ -878,7 +878,7 @@ class FluxDifferentialImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin):
|
||||
self.scheduler.config.get("base_image_seq_len", 256),
|
||||
self.scheduler.config.get("max_image_seq_len", 4096),
|
||||
self.scheduler.config.get("base_shift", 0.5),
|
||||
self.scheduler.config.get("max_shift", 1.16),
|
||||
self.scheduler.config.get("max_shift", 1.15),
|
||||
)
|
||||
timesteps, num_inference_steps = retrieve_timesteps(
|
||||
self.scheduler,
|
||||
|
||||
@@ -94,7 +94,7 @@ def calculate_shift(
|
||||
base_seq_len: int = 256,
|
||||
max_seq_len: int = 4096,
|
||||
base_shift: float = 0.5,
|
||||
max_shift: float = 1.16,
|
||||
max_shift: float = 1.15,
|
||||
):
|
||||
m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
|
||||
b = base_shift - m * base_seq_len
|
||||
@@ -823,7 +823,7 @@ class RFInversionFluxPipeline(
|
||||
self.scheduler.config.get("base_image_seq_len", 256),
|
||||
self.scheduler.config.get("max_image_seq_len", 4096),
|
||||
self.scheduler.config.get("base_shift", 0.5),
|
||||
self.scheduler.config.get("max_shift", 1.16),
|
||||
self.scheduler.config.get("max_shift", 1.15),
|
||||
)
|
||||
timesteps, num_inference_steps = retrieve_timesteps(
|
||||
self.scheduler,
|
||||
@@ -993,7 +993,7 @@ class RFInversionFluxPipeline(
|
||||
self.scheduler.config.get("base_image_seq_len", 256),
|
||||
self.scheduler.config.get("max_image_seq_len", 4096),
|
||||
self.scheduler.config.get("base_shift", 0.5),
|
||||
self.scheduler.config.get("max_shift", 1.16),
|
||||
self.scheduler.config.get("max_shift", 1.15),
|
||||
)
|
||||
timesteps, num_inversion_steps = retrieve_timesteps(
|
||||
self.scheduler,
|
||||
|
||||
@@ -91,7 +91,7 @@ def calculate_shift(
|
||||
base_seq_len: int = 256,
|
||||
max_seq_len: int = 4096,
|
||||
base_shift: float = 0.5,
|
||||
max_shift: float = 1.16,
|
||||
max_shift: float = 1.15,
|
||||
):
|
||||
m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
|
||||
b = base_shift - m * base_seq_len
|
||||
@@ -1041,7 +1041,7 @@ class FluxSemanticGuidancePipeline(
|
||||
self.scheduler.config.get("base_image_seq_len", 256),
|
||||
self.scheduler.config.get("max_image_seq_len", 4096),
|
||||
self.scheduler.config.get("base_shift", 0.5),
|
||||
self.scheduler.config.get("max_shift", 1.16),
|
||||
self.scheduler.config.get("max_shift", 1.15),
|
||||
)
|
||||
timesteps, num_inference_steps = retrieve_timesteps(
|
||||
self.scheduler,
|
||||
|
||||
@@ -70,7 +70,7 @@ def calculate_shift(
|
||||
base_seq_len: int = 256,
|
||||
max_seq_len: int = 4096,
|
||||
base_shift: float = 0.5,
|
||||
max_shift: float = 1.16,
|
||||
max_shift: float = 1.15,
|
||||
):
|
||||
m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
|
||||
b = base_shift - m * base_seq_len
|
||||
@@ -759,7 +759,7 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi
|
||||
self.scheduler.config.get("base_image_seq_len", 256),
|
||||
self.scheduler.config.get("max_image_seq_len", 4096),
|
||||
self.scheduler.config.get("base_shift", 0.5),
|
||||
self.scheduler.config.get("max_shift", 1.16),
|
||||
self.scheduler.config.get("max_shift", 1.15),
|
||||
)
|
||||
timesteps, num_inference_steps = retrieve_timesteps(
|
||||
self.scheduler,
|
||||
|
||||
@@ -1143,7 +1143,7 @@ def main(args):
|
||||
if global_step >= args.max_train_steps:
|
||||
break
|
||||
|
||||
# Create the pipeline using using the trained modules and save it.
|
||||
# Create the pipeline using the trained modules and save it.
|
||||
accelerator.wait_for_everyone()
|
||||
if accelerator.is_main_process:
|
||||
controlnet = unwrap_model(controlnet)
|
||||
|
||||
127
examples/dreambooth/README_lumina2.md
Normal file
127
examples/dreambooth/README_lumina2.md
Normal file
@@ -0,0 +1,127 @@
|
||||
# DreamBooth training example for Lumina2
|
||||
|
||||
[DreamBooth](https://arxiv.org/abs/2208.12242) is a method to personalize text2image models like stable diffusion given just a few (3~5) images of a subject.
|
||||
|
||||
The `train_dreambooth_lora_lumina2.py` script shows how to implement the training procedure with [LoRA](https://huggingface.co/docs/peft/conceptual_guides/adapter#low-rank-adaptation-lora) and adapt it for [Lumina2](https://huggingface.co/docs/diffusers/main/en/api/pipelines/lumina2).
|
||||
|
||||
|
||||
This will also allow us to push the trained model parameters to the Hugging Face Hub platform.
|
||||
|
||||
## Running locally with PyTorch
|
||||
|
||||
### Installing the dependencies
|
||||
|
||||
Before running the scripts, make sure to install the library's training dependencies:
|
||||
|
||||
**Important**
|
||||
|
||||
To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/huggingface/diffusers
|
||||
cd diffusers
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
Then cd in the `examples/dreambooth` folder and run
|
||||
```bash
|
||||
pip install -r requirements_sana.txt
|
||||
```
|
||||
|
||||
And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
Or for a default accelerate configuration without answering questions about your environment
|
||||
|
||||
```bash
|
||||
accelerate config default
|
||||
```
|
||||
|
||||
Or if your environment doesn't support an interactive shell (e.g., a notebook)
|
||||
|
||||
```python
|
||||
from accelerate.utils import write_basic_config
|
||||
write_basic_config()
|
||||
```
|
||||
|
||||
When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups.
|
||||
Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.14.0` installed in your environment.
|
||||
|
||||
|
||||
### Dog toy example
|
||||
|
||||
Now let's get our dataset. For this example we will use some dog images: https://huggingface.co/datasets/diffusers/dog-example.
|
||||
|
||||
Let's first download it locally:
|
||||
|
||||
```python
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
local_dir = "./dog"
|
||||
snapshot_download(
|
||||
"diffusers/dog-example",
|
||||
local_dir=local_dir, repo_type="dataset",
|
||||
ignore_patterns=".gitattributes",
|
||||
)
|
||||
```
|
||||
|
||||
This will also allow us to push the trained LoRA parameters to the Hugging Face Hub platform.
|
||||
|
||||
Now, we can launch training using:
|
||||
|
||||
```bash
|
||||
export MODEL_NAME="Alpha-VLLM/Lumina-Image-2.0"
|
||||
export INSTANCE_DIR="dog"
|
||||
export OUTPUT_DIR="trained-lumina2-lora"
|
||||
|
||||
accelerate launch train_dreambooth_lora_lumina2.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--mixed_precision="bf16" \
|
||||
--instance_prompt="a photo of sks dog" \
|
||||
--resolution=1024 \
|
||||
--train_batch_size=1 \
|
||||
--gradient_accumulation_steps=4 \
|
||||
--use_8bit_adam \
|
||||
--learning_rate=1e-4 \
|
||||
--report_to="wandb" \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--max_train_steps=500 \
|
||||
--validation_prompt="A photo of sks dog in a bucket" \
|
||||
--validation_epochs=25 \
|
||||
--seed="0" \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
For using `push_to_hub`, make you're logged into your Hugging Face account:
|
||||
|
||||
```bash
|
||||
huggingface-cli login
|
||||
```
|
||||
|
||||
To better track our training experiments, we're using the following flags in the command above:
|
||||
|
||||
* `report_to="wandb` will ensure the training runs are tracked on [Weights and Biases](https://wandb.ai/site). To use it, be sure to install `wandb` with `pip install wandb`. Don't forget to call `wandb login <your_api_key>` before training if you haven't done it before.
|
||||
* `validation_prompt` and `validation_epochs` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected.
|
||||
|
||||
## Notes
|
||||
|
||||
Additionally, we welcome you to explore the following CLI arguments:
|
||||
|
||||
* `--lora_layers`: The transformer modules to apply LoRA training on. Please specify the layers in a comma seperated. E.g. - "to_k,to_q,to_v" will result in lora training of attention layers only.
|
||||
* `--system_prompt`: A custom system prompt to provide additional personality to the model.
|
||||
* `--max_sequence_length`: Maximum sequence length to use for text embeddings.
|
||||
|
||||
|
||||
We provide several options for optimizing memory optimization:
|
||||
|
||||
* `--offload`: When enabled, we will offload the text encoder and VAE to CPU, when they are not used.
|
||||
* `cache_latents`: When enabled, we will pre-compute the latents from the input images with the VAE and remove the VAE from memory once done.
|
||||
* `--use_8bit_adam`: When enabled, we will use the 8bit version of AdamW provided by the `bitsandbytes` library.
|
||||
|
||||
Refer to the [official documentation](https://huggingface.co/docs/diffusers/main/en/api/pipelines/lumina2) of the `LuminaPipeline` to know more about the model.
|
||||
206
examples/dreambooth/test_dreambooth_lora_lumina2.py
Normal file
206
examples/dreambooth/test_dreambooth_lora_lumina2.py
Normal file
@@ -0,0 +1,206 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2024 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
import safetensors
|
||||
|
||||
|
||||
sys.path.append("..")
|
||||
from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
logger = logging.getLogger()
|
||||
stream_handler = logging.StreamHandler(sys.stdout)
|
||||
logger.addHandler(stream_handler)
|
||||
|
||||
|
||||
class DreamBoothLoRAlumina2(ExamplesTestsAccelerate):
|
||||
instance_data_dir = "docs/source/en/imgs"
|
||||
pretrained_model_name_or_path = "hf-internal-testing/tiny-lumina2-pipe"
|
||||
script_path = "examples/dreambooth/train_dreambooth_lora_lumina2.py"
|
||||
transformer_layer_type = "layers.0.attn.to_k"
|
||||
|
||||
def test_dreambooth_lora_lumina2(self):
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
test_args = f"""
|
||||
{self.script_path}
|
||||
--pretrained_model_name_or_path {self.pretrained_model_name_or_path}
|
||||
--instance_data_dir {self.instance_data_dir}
|
||||
--resolution 32
|
||||
--train_batch_size 1
|
||||
--gradient_accumulation_steps 1
|
||||
--max_train_steps 2
|
||||
--learning_rate 5.0e-04
|
||||
--scale_lr
|
||||
--lr_scheduler constant
|
||||
--lr_warmup_steps 0
|
||||
--output_dir {tmpdir}
|
||||
--max_sequence_length 16
|
||||
""".split()
|
||||
|
||||
test_args.extend(["--instance_prompt", ""])
|
||||
run_command(self._launch_args + test_args)
|
||||
# save_pretrained smoke test
|
||||
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
|
||||
|
||||
# make sure the state_dict has the correct naming in the parameters.
|
||||
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
|
||||
is_lora = all("lora" in k for k in lora_state_dict.keys())
|
||||
self.assertTrue(is_lora)
|
||||
|
||||
# when not training the text encoder, all the parameters in the state dict should start
|
||||
# with `"transformer"` in their names.
|
||||
starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys())
|
||||
self.assertTrue(starts_with_transformer)
|
||||
|
||||
def test_dreambooth_lora_latent_caching(self):
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
test_args = f"""
|
||||
{self.script_path}
|
||||
--pretrained_model_name_or_path {self.pretrained_model_name_or_path}
|
||||
--instance_data_dir {self.instance_data_dir}
|
||||
--resolution 32
|
||||
--train_batch_size 1
|
||||
--gradient_accumulation_steps 1
|
||||
--max_train_steps 2
|
||||
--cache_latents
|
||||
--learning_rate 5.0e-04
|
||||
--scale_lr
|
||||
--lr_scheduler constant
|
||||
--lr_warmup_steps 0
|
||||
--output_dir {tmpdir}
|
||||
--max_sequence_length 16
|
||||
""".split()
|
||||
|
||||
test_args.extend(["--instance_prompt", ""])
|
||||
run_command(self._launch_args + test_args)
|
||||
# save_pretrained smoke test
|
||||
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
|
||||
|
||||
# make sure the state_dict has the correct naming in the parameters.
|
||||
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
|
||||
is_lora = all("lora" in k for k in lora_state_dict.keys())
|
||||
self.assertTrue(is_lora)
|
||||
|
||||
# when not training the text encoder, all the parameters in the state dict should start
|
||||
# with `"transformer"` in their names.
|
||||
starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys())
|
||||
self.assertTrue(starts_with_transformer)
|
||||
|
||||
def test_dreambooth_lora_layers(self):
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
test_args = f"""
|
||||
{self.script_path}
|
||||
--pretrained_model_name_or_path {self.pretrained_model_name_or_path}
|
||||
--instance_data_dir {self.instance_data_dir}
|
||||
--resolution 32
|
||||
--train_batch_size 1
|
||||
--gradient_accumulation_steps 1
|
||||
--max_train_steps 2
|
||||
--cache_latents
|
||||
--learning_rate 5.0e-04
|
||||
--scale_lr
|
||||
--lora_layers {self.transformer_layer_type}
|
||||
--lr_scheduler constant
|
||||
--lr_warmup_steps 0
|
||||
--output_dir {tmpdir}
|
||||
--max_sequence_length 16
|
||||
""".split()
|
||||
|
||||
test_args.extend(["--instance_prompt", ""])
|
||||
run_command(self._launch_args + test_args)
|
||||
# save_pretrained smoke test
|
||||
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
|
||||
|
||||
# make sure the state_dict has the correct naming in the parameters.
|
||||
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
|
||||
is_lora = all("lora" in k for k in lora_state_dict.keys())
|
||||
self.assertTrue(is_lora)
|
||||
|
||||
# when not training the text encoder, all the parameters in the state dict should start
|
||||
# with `"transformer"` in their names. In this test, we only params of
|
||||
# `self.transformer_layer_type` should be in the state dict.
|
||||
starts_with_transformer = all(self.transformer_layer_type in key for key in lora_state_dict)
|
||||
self.assertTrue(starts_with_transformer)
|
||||
|
||||
def test_dreambooth_lora_lumina2_checkpointing_checkpoints_total_limit(self):
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
test_args = f"""
|
||||
{self.script_path}
|
||||
--pretrained_model_name_or_path={self.pretrained_model_name_or_path}
|
||||
--instance_data_dir={self.instance_data_dir}
|
||||
--output_dir={tmpdir}
|
||||
--resolution=32
|
||||
--train_batch_size=1
|
||||
--gradient_accumulation_steps=1
|
||||
--max_train_steps=6
|
||||
--checkpoints_total_limit=2
|
||||
--checkpointing_steps=2
|
||||
--max_sequence_length 16
|
||||
""".split()
|
||||
|
||||
test_args.extend(["--instance_prompt", ""])
|
||||
run_command(self._launch_args + test_args)
|
||||
|
||||
self.assertEqual(
|
||||
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
|
||||
{"checkpoint-4", "checkpoint-6"},
|
||||
)
|
||||
|
||||
def test_dreambooth_lora_lumina2_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
test_args = f"""
|
||||
{self.script_path}
|
||||
--pretrained_model_name_or_path={self.pretrained_model_name_or_path}
|
||||
--instance_data_dir={self.instance_data_dir}
|
||||
--output_dir={tmpdir}
|
||||
--resolution=32
|
||||
--train_batch_size=1
|
||||
--gradient_accumulation_steps=1
|
||||
--max_train_steps=4
|
||||
--checkpointing_steps=2
|
||||
--max_sequence_length 166
|
||||
""".split()
|
||||
|
||||
test_args.extend(["--instance_prompt", ""])
|
||||
run_command(self._launch_args + test_args)
|
||||
|
||||
self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"})
|
||||
|
||||
resume_run_args = f"""
|
||||
{self.script_path}
|
||||
--pretrained_model_name_or_path={self.pretrained_model_name_or_path}
|
||||
--instance_data_dir={self.instance_data_dir}
|
||||
--output_dir={tmpdir}
|
||||
--resolution=32
|
||||
--train_batch_size=1
|
||||
--gradient_accumulation_steps=1
|
||||
--max_train_steps=8
|
||||
--checkpointing_steps=2
|
||||
--resume_from_checkpoint=checkpoint-4
|
||||
--checkpoints_total_limit=2
|
||||
--max_sequence_length 16
|
||||
""".split()
|
||||
|
||||
resume_run_args.extend(["--instance_prompt", ""])
|
||||
run_command(self._launch_args + resume_run_args)
|
||||
|
||||
self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"})
|
||||
1563
examples/dreambooth/train_dreambooth_lora_lumina2.py
Normal file
1563
examples/dreambooth/train_dreambooth_lora_lumina2.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -995,7 +995,8 @@ def main(args):
|
||||
if args.enable_npu_flash_attention:
|
||||
if is_torch_npu_available():
|
||||
logger.info("npu flash attention enabled.")
|
||||
transformer.enable_npu_flash_attention()
|
||||
for block in transformer.transformer_blocks:
|
||||
block.attn2.set_use_npu_flash_attention(True)
|
||||
else:
|
||||
raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu device ")
|
||||
|
||||
|
||||
@@ -203,7 +203,7 @@ def log_validation(
|
||||
|
||||
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args)
|
||||
|
||||
pipeline = pipeline.to(accelerator.device, dtype=torch_dtype)
|
||||
pipeline = pipeline.to(accelerator.device)
|
||||
pipeline.set_progress_bar_config(disable=True)
|
||||
|
||||
# run inference
|
||||
@@ -213,7 +213,7 @@ def log_validation(
|
||||
if torch.backends.mps.is_available() or "playground" in args.pretrained_model_name_or_path:
|
||||
autocast_ctx = nullcontext()
|
||||
else:
|
||||
autocast_ctx = torch.autocast(accelerator.device.type)
|
||||
autocast_ctx = torch.autocast(accelerator.device.type) if not is_final_validation else nullcontext()
|
||||
|
||||
with autocast_ctx:
|
||||
images = [pipeline(**pipeline_args, generator=generator).images[0] for _ in range(args.num_validation_images)]
|
||||
|
||||
@@ -695,7 +695,7 @@ def main():
|
||||
)
|
||||
# We need to ensure that the original and the edited images undergo the same
|
||||
# augmentation transforms.
|
||||
images = np.concatenate([original_images, edited_images])
|
||||
images = np.stack([original_images, edited_images])
|
||||
images = torch.tensor(images)
|
||||
images = 2 * (images / 255) - 1
|
||||
return train_transforms(images)
|
||||
@@ -706,7 +706,7 @@ def main():
|
||||
# Since the original and edited images were concatenated before
|
||||
# applying the transformations, we need to separate them and reshape
|
||||
# them accordingly.
|
||||
original_images, edited_images = preprocessed_images.chunk(2)
|
||||
original_images, edited_images = preprocessed_images
|
||||
original_images = original_images.reshape(-1, 3, args.resolution, args.resolution)
|
||||
edited_images = edited_images.reshape(-1, 3, args.resolution, args.resolution)
|
||||
|
||||
|
||||
@@ -766,7 +766,7 @@ def main():
|
||||
)
|
||||
# We need to ensure that the original and the edited images undergo the same
|
||||
# augmentation transforms.
|
||||
images = np.concatenate([original_images, edited_images])
|
||||
images = np.stack([original_images, edited_images])
|
||||
images = torch.tensor(images)
|
||||
images = 2 * (images / 255) - 1
|
||||
return train_transforms(images)
|
||||
@@ -906,7 +906,7 @@ def main():
|
||||
# Since the original and edited images were concatenated before
|
||||
# applying the transformations, we need to separate them and reshape
|
||||
# them accordingly.
|
||||
original_images, edited_images = preprocessed_images.chunk(2)
|
||||
original_images, edited_images = preprocessed_images
|
||||
original_images = original_images.reshape(-1, 3, args.resolution, args.resolution)
|
||||
edited_images = edited_images.reshape(-1, 3, args.resolution, args.resolution)
|
||||
|
||||
|
||||
@@ -82,31 +82,11 @@ pipeline = EasyPipelineForInpainting.from_huggingface(
|
||||
## Search Civitai and Huggingface
|
||||
|
||||
```python
|
||||
from pipeline_easy import (
|
||||
search_huggingface,
|
||||
search_civitai,
|
||||
)
|
||||
|
||||
# Search Lora
|
||||
Lora = search_civitai(
|
||||
"Keyword_to_search_Lora",
|
||||
model_type="LORA",
|
||||
base_model = "SD 1.5",
|
||||
download=True,
|
||||
)
|
||||
# Load Lora into the pipeline.
|
||||
pipeline.load_lora_weights(Lora)
|
||||
pipeline.auto_load_lora_weights("Detail Tweaker")
|
||||
|
||||
|
||||
# Search TextualInversion
|
||||
TextualInversion = search_civitai(
|
||||
"EasyNegative",
|
||||
model_type="TextualInversion",
|
||||
base_model = "SD 1.5",
|
||||
download=True
|
||||
)
|
||||
# Load TextualInversion into the pipeline.
|
||||
pipeline.load_textual_inversion(TextualInversion, token="EasyNegative")
|
||||
pipeline.auto_load_textual_inversion("EasyNegative", token="EasyNegative")
|
||||
```
|
||||
|
||||
### Search Civitai
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2024 suzukimain
|
||||
# Copyright 2025 suzukimain
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,11 +15,13 @@
|
||||
|
||||
import os
|
||||
import re
|
||||
import types
|
||||
from collections import OrderedDict
|
||||
from dataclasses import asdict, dataclass
|
||||
from typing import Union
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
import requests
|
||||
import torch
|
||||
from huggingface_hub import hf_api, hf_hub_download
|
||||
from huggingface_hub.file_download import http_get
|
||||
from huggingface_hub.utils import validate_hf_hub_args
|
||||
@@ -30,6 +32,7 @@ from diffusers.loaders.single_file_utils import (
|
||||
infer_diffusers_model_type,
|
||||
load_single_file_checkpoint,
|
||||
)
|
||||
from diffusers.pipelines.animatediff import AnimateDiffPipeline, AnimateDiffSDXLPipeline
|
||||
from diffusers.pipelines.auto_pipeline import (
|
||||
AutoPipelineForImage2Image,
|
||||
AutoPipelineForInpainting,
|
||||
@@ -39,13 +42,18 @@ from diffusers.pipelines.controlnet import (
|
||||
StableDiffusionControlNetImg2ImgPipeline,
|
||||
StableDiffusionControlNetInpaintPipeline,
|
||||
StableDiffusionControlNetPipeline,
|
||||
StableDiffusionXLControlNetImg2ImgPipeline,
|
||||
StableDiffusionXLControlNetPipeline,
|
||||
)
|
||||
from diffusers.pipelines.flux import FluxImg2ImgPipeline, FluxPipeline
|
||||
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
||||
from diffusers.pipelines.stable_diffusion import (
|
||||
StableDiffusionImg2ImgPipeline,
|
||||
StableDiffusionInpaintPipeline,
|
||||
StableDiffusionPipeline,
|
||||
StableDiffusionUpscalePipeline,
|
||||
)
|
||||
from diffusers.pipelines.stable_diffusion_3 import StableDiffusion3Img2ImgPipeline, StableDiffusion3Pipeline
|
||||
from diffusers.pipelines.stable_diffusion_xl import (
|
||||
StableDiffusionXLImg2ImgPipeline,
|
||||
StableDiffusionXLInpaintPipeline,
|
||||
@@ -59,46 +67,133 @@ logger = logging.get_logger(__name__)
|
||||
|
||||
SINGLE_FILE_CHECKPOINT_TEXT2IMAGE_PIPELINE_MAPPING = OrderedDict(
|
||||
[
|
||||
("xl_base", StableDiffusionXLPipeline),
|
||||
("xl_refiner", StableDiffusionXLPipeline),
|
||||
("xl_inpaint", None),
|
||||
("playground-v2-5", StableDiffusionXLPipeline),
|
||||
("upscale", None),
|
||||
("animatediff_rgb", AnimateDiffPipeline),
|
||||
("animatediff_scribble", AnimateDiffPipeline),
|
||||
("animatediff_sdxl_beta", AnimateDiffSDXLPipeline),
|
||||
("animatediff_v1", AnimateDiffPipeline),
|
||||
("animatediff_v2", AnimateDiffPipeline),
|
||||
("animatediff_v3", AnimateDiffPipeline),
|
||||
("autoencoder-dc-f128c512", None),
|
||||
("autoencoder-dc-f32c32", None),
|
||||
("autoencoder-dc-f32c32-sana", None),
|
||||
("autoencoder-dc-f64c128", None),
|
||||
("controlnet", StableDiffusionControlNetPipeline),
|
||||
("controlnet_xl", StableDiffusionXLControlNetPipeline),
|
||||
("controlnet_xl_large", StableDiffusionXLControlNetPipeline),
|
||||
("controlnet_xl_mid", StableDiffusionXLControlNetPipeline),
|
||||
("controlnet_xl_small", StableDiffusionXLControlNetPipeline),
|
||||
("flux-depth", FluxPipeline),
|
||||
("flux-dev", FluxPipeline),
|
||||
("flux-fill", FluxPipeline),
|
||||
("flux-schnell", FluxPipeline),
|
||||
("hunyuan-video", None),
|
||||
("inpainting", None),
|
||||
("inpainting_v2", None),
|
||||
("controlnet", StableDiffusionControlNetPipeline),
|
||||
("v2", StableDiffusionPipeline),
|
||||
("ltx-video", None),
|
||||
("ltx-video-0.9.1", None),
|
||||
("mochi-1-preview", None),
|
||||
("playground-v2-5", StableDiffusionXLPipeline),
|
||||
("sd3", StableDiffusion3Pipeline),
|
||||
("sd35_large", StableDiffusion3Pipeline),
|
||||
("sd35_medium", StableDiffusion3Pipeline),
|
||||
("stable_cascade_stage_b", None),
|
||||
("stable_cascade_stage_b_lite", None),
|
||||
("stable_cascade_stage_c", None),
|
||||
("stable_cascade_stage_c_lite", None),
|
||||
("upscale", StableDiffusionUpscalePipeline),
|
||||
("v1", StableDiffusionPipeline),
|
||||
("v2", StableDiffusionPipeline),
|
||||
("xl_base", StableDiffusionXLPipeline),
|
||||
("xl_inpaint", None),
|
||||
("xl_refiner", StableDiffusionXLPipeline),
|
||||
]
|
||||
)
|
||||
|
||||
SINGLE_FILE_CHECKPOINT_IMAGE2IMAGE_PIPELINE_MAPPING = OrderedDict(
|
||||
[
|
||||
("xl_base", StableDiffusionXLImg2ImgPipeline),
|
||||
("xl_refiner", StableDiffusionXLImg2ImgPipeline),
|
||||
("xl_inpaint", None),
|
||||
("playground-v2-5", StableDiffusionXLImg2ImgPipeline),
|
||||
("upscale", None),
|
||||
("animatediff_rgb", AnimateDiffPipeline),
|
||||
("animatediff_scribble", AnimateDiffPipeline),
|
||||
("animatediff_sdxl_beta", AnimateDiffSDXLPipeline),
|
||||
("animatediff_v1", AnimateDiffPipeline),
|
||||
("animatediff_v2", AnimateDiffPipeline),
|
||||
("animatediff_v3", AnimateDiffPipeline),
|
||||
("autoencoder-dc-f128c512", None),
|
||||
("autoencoder-dc-f32c32", None),
|
||||
("autoencoder-dc-f32c32-sana", None),
|
||||
("autoencoder-dc-f64c128", None),
|
||||
("controlnet", StableDiffusionControlNetImg2ImgPipeline),
|
||||
("controlnet_xl", StableDiffusionXLControlNetImg2ImgPipeline),
|
||||
("controlnet_xl_large", StableDiffusionXLControlNetImg2ImgPipeline),
|
||||
("controlnet_xl_mid", StableDiffusionXLControlNetImg2ImgPipeline),
|
||||
("controlnet_xl_small", StableDiffusionXLControlNetImg2ImgPipeline),
|
||||
("flux-depth", FluxImg2ImgPipeline),
|
||||
("flux-dev", FluxImg2ImgPipeline),
|
||||
("flux-fill", FluxImg2ImgPipeline),
|
||||
("flux-schnell", FluxImg2ImgPipeline),
|
||||
("hunyuan-video", None),
|
||||
("inpainting", None),
|
||||
("inpainting_v2", None),
|
||||
("controlnet", StableDiffusionControlNetImg2ImgPipeline),
|
||||
("v2", StableDiffusionImg2ImgPipeline),
|
||||
("ltx-video", None),
|
||||
("ltx-video-0.9.1", None),
|
||||
("mochi-1-preview", None),
|
||||
("playground-v2-5", StableDiffusionXLImg2ImgPipeline),
|
||||
("sd3", StableDiffusion3Img2ImgPipeline),
|
||||
("sd35_large", StableDiffusion3Img2ImgPipeline),
|
||||
("sd35_medium", StableDiffusion3Img2ImgPipeline),
|
||||
("stable_cascade_stage_b", None),
|
||||
("stable_cascade_stage_b_lite", None),
|
||||
("stable_cascade_stage_c", None),
|
||||
("stable_cascade_stage_c_lite", None),
|
||||
("upscale", StableDiffusionUpscalePipeline),
|
||||
("v1", StableDiffusionImg2ImgPipeline),
|
||||
("v2", StableDiffusionImg2ImgPipeline),
|
||||
("xl_base", StableDiffusionXLImg2ImgPipeline),
|
||||
("xl_inpaint", None),
|
||||
("xl_refiner", StableDiffusionXLImg2ImgPipeline),
|
||||
]
|
||||
)
|
||||
|
||||
SINGLE_FILE_CHECKPOINT_INPAINT_PIPELINE_MAPPING = OrderedDict(
|
||||
[
|
||||
("xl_base", None),
|
||||
("xl_refiner", None),
|
||||
("xl_inpaint", StableDiffusionXLInpaintPipeline),
|
||||
("playground-v2-5", None),
|
||||
("upscale", None),
|
||||
("animatediff_rgb", None),
|
||||
("animatediff_scribble", None),
|
||||
("animatediff_sdxl_beta", None),
|
||||
("animatediff_v1", None),
|
||||
("animatediff_v2", None),
|
||||
("animatediff_v3", None),
|
||||
("autoencoder-dc-f128c512", None),
|
||||
("autoencoder-dc-f32c32", None),
|
||||
("autoencoder-dc-f32c32-sana", None),
|
||||
("autoencoder-dc-f64c128", None),
|
||||
("controlnet", StableDiffusionControlNetInpaintPipeline),
|
||||
("controlnet_xl", None),
|
||||
("controlnet_xl_large", None),
|
||||
("controlnet_xl_mid", None),
|
||||
("controlnet_xl_small", None),
|
||||
("flux-depth", None),
|
||||
("flux-dev", None),
|
||||
("flux-fill", None),
|
||||
("flux-schnell", None),
|
||||
("hunyuan-video", None),
|
||||
("inpainting", StableDiffusionInpaintPipeline),
|
||||
("inpainting_v2", StableDiffusionInpaintPipeline),
|
||||
("controlnet", StableDiffusionControlNetInpaintPipeline),
|
||||
("v2", None),
|
||||
("ltx-video", None),
|
||||
("ltx-video-0.9.1", None),
|
||||
("mochi-1-preview", None),
|
||||
("playground-v2-5", None),
|
||||
("sd3", None),
|
||||
("sd35_large", None),
|
||||
("sd35_medium", None),
|
||||
("stable_cascade_stage_b", None),
|
||||
("stable_cascade_stage_b_lite", None),
|
||||
("stable_cascade_stage_c", None),
|
||||
("stable_cascade_stage_c_lite", None),
|
||||
("upscale", StableDiffusionUpscalePipeline),
|
||||
("v1", None),
|
||||
("v2", None),
|
||||
("xl_base", None),
|
||||
("xl_inpaint", StableDiffusionXLInpaintPipeline),
|
||||
("xl_refiner", None),
|
||||
]
|
||||
)
|
||||
|
||||
@@ -116,14 +211,33 @@ CONFIG_FILE_LIST = [
|
||||
"diffusion_pytorch_model.non_ema.safetensors",
|
||||
]
|
||||
|
||||
DIFFUSERS_CONFIG_DIR = ["safety_checker", "unet", "vae", "text_encoder", "text_encoder_2"]
|
||||
|
||||
INPAINT_PIPELINE_KEYS = [
|
||||
"xl_inpaint",
|
||||
"inpainting",
|
||||
"inpainting_v2",
|
||||
DIFFUSERS_CONFIG_DIR = [
|
||||
"safety_checker",
|
||||
"unet",
|
||||
"vae",
|
||||
"text_encoder",
|
||||
"text_encoder_2",
|
||||
]
|
||||
|
||||
TOKENIZER_SHAPE_MAP = {
|
||||
768: [
|
||||
"SD 1.4",
|
||||
"SD 1.5",
|
||||
"SD 1.5 LCM",
|
||||
"SDXL 0.9",
|
||||
"SDXL 1.0",
|
||||
"SDXL 1.0 LCM",
|
||||
"SDXL Distilled",
|
||||
"SDXL Turbo",
|
||||
"SDXL Lightning",
|
||||
"PixArt a",
|
||||
"Playground v2",
|
||||
"Pony",
|
||||
],
|
||||
1024: ["SD 2.0", "SD 2.0 768", "SD 2.1", "SD 2.1 768", "SD 2.1 Unclip"],
|
||||
}
|
||||
|
||||
|
||||
EXTENSION = [".safetensors", ".ckpt", ".bin"]
|
||||
|
||||
CACHE_HOME = os.path.expanduser("~/.cache")
|
||||
@@ -162,12 +276,28 @@ class ModelStatus:
|
||||
The name of the model file.
|
||||
local (`bool`):
|
||||
Whether the model exists locally
|
||||
site_url (`str`):
|
||||
The URL of the site where the model is hosted.
|
||||
"""
|
||||
|
||||
search_word: str = ""
|
||||
download_url: str = ""
|
||||
file_name: str = ""
|
||||
local: bool = False
|
||||
site_url: str = ""
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExtraStatus:
|
||||
r"""
|
||||
Data class for storing extra status information.
|
||||
|
||||
Attributes:
|
||||
trained_words (`str`):
|
||||
The words used to trigger the model
|
||||
"""
|
||||
|
||||
trained_words: Union[List[str], None] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -191,8 +321,9 @@ class SearchResult:
|
||||
model_path: str = ""
|
||||
loading_method: Union[str, None] = None
|
||||
checkpoint_format: Union[str, None] = None
|
||||
repo_status: RepoStatus = RepoStatus()
|
||||
model_status: ModelStatus = ModelStatus()
|
||||
repo_status: RepoStatus = field(default_factory=RepoStatus)
|
||||
model_status: ModelStatus = field(default_factory=ModelStatus)
|
||||
extra_status: ExtraStatus = field(default_factory=ExtraStatus)
|
||||
|
||||
|
||||
@validate_hf_hub_args
|
||||
@@ -385,6 +516,7 @@ def file_downloader(
|
||||
proxies = kwargs.pop("proxies", None)
|
||||
force_download = kwargs.pop("force_download", False)
|
||||
displayed_filename = kwargs.pop("displayed_filename", None)
|
||||
|
||||
# Default mode for file writing and initial file size
|
||||
mode = "wb"
|
||||
file_size = 0
|
||||
@@ -396,7 +528,7 @@ def file_downloader(
|
||||
if os.path.exists(save_path):
|
||||
if not force_download:
|
||||
# If the file exists and force_download is False, skip the download
|
||||
logger.warning(f"File already exists: {save_path}, skipping download.")
|
||||
logger.info(f"File already exists: {save_path}, skipping download.")
|
||||
return None
|
||||
elif resume:
|
||||
# If resuming, set mode to append binary and get current file size
|
||||
@@ -457,10 +589,18 @@ def search_huggingface(search_word: str, **kwargs) -> Union[str, SearchResult, N
|
||||
gated = kwargs.pop("gated", False)
|
||||
skip_error = kwargs.pop("skip_error", False)
|
||||
|
||||
file_list = []
|
||||
hf_repo_info = {}
|
||||
hf_security_info = {}
|
||||
model_path = ""
|
||||
repo_id, file_name = "", ""
|
||||
diffusers_model_exists = False
|
||||
|
||||
# Get the type and loading method for the keyword
|
||||
search_word_status = get_keyword_types(search_word)
|
||||
|
||||
if search_word_status["type"]["hf_repo"]:
|
||||
hf_repo_info = hf_api.model_info(repo_id=search_word, securityStatus=True)
|
||||
if download:
|
||||
model_path = DiffusionPipeline.download(
|
||||
search_word,
|
||||
@@ -503,13 +643,6 @@ def search_huggingface(search_word: str, **kwargs) -> Union[str, SearchResult, N
|
||||
)
|
||||
model_dicts = [asdict(value) for value in list(hf_models)]
|
||||
|
||||
file_list = []
|
||||
hf_repo_info = {}
|
||||
hf_security_info = {}
|
||||
model_path = ""
|
||||
repo_id, file_name = "", ""
|
||||
diffusers_model_exists = False
|
||||
|
||||
# Loop through models to find a suitable candidate
|
||||
for repo_info in model_dicts:
|
||||
repo_id = repo_info["id"]
|
||||
@@ -523,7 +656,10 @@ def search_huggingface(search_word: str, **kwargs) -> Union[str, SearchResult, N
|
||||
if hf_security_info["scansDone"]:
|
||||
for info in repo_info["siblings"]:
|
||||
file_path = info["rfilename"]
|
||||
if "model_index.json" == file_path and checkpoint_format in ["diffusers", "all"]:
|
||||
if "model_index.json" == file_path and checkpoint_format in [
|
||||
"diffusers",
|
||||
"all",
|
||||
]:
|
||||
diffusers_model_exists = True
|
||||
break
|
||||
|
||||
@@ -571,6 +707,10 @@ def search_huggingface(search_word: str, **kwargs) -> Union[str, SearchResult, N
|
||||
force_download=force_download,
|
||||
)
|
||||
|
||||
# `pathlib.PosixPath` may be returned
|
||||
if model_path:
|
||||
model_path = str(model_path)
|
||||
|
||||
if file_name:
|
||||
download_url = f"https://huggingface.co/{repo_id}/blob/main/{file_name}"
|
||||
else:
|
||||
@@ -586,10 +726,12 @@ def search_huggingface(search_word: str, **kwargs) -> Union[str, SearchResult, N
|
||||
repo_status=RepoStatus(repo_id=repo_id, repo_hash=hf_repo_info.sha, version=revision),
|
||||
model_status=ModelStatus(
|
||||
search_word=search_word,
|
||||
site_url=download_url,
|
||||
download_url=download_url,
|
||||
file_name=file_name,
|
||||
local=download,
|
||||
),
|
||||
extra_status=ExtraStatus(trained_words=None),
|
||||
)
|
||||
|
||||
else:
|
||||
@@ -605,6 +747,8 @@ def search_civitai(search_word: str, **kwargs) -> Union[str, SearchResult, None]
|
||||
The search query string.
|
||||
model_type (`str`, *optional*, defaults to `Checkpoint`):
|
||||
The type of model to search for.
|
||||
sort (`str`, *optional*):
|
||||
The order in which you wish to sort the results(for example, `Highest Rated`, `Most Downloaded`, `Newest`).
|
||||
base_model (`str`, *optional*):
|
||||
The base model to filter by.
|
||||
download (`bool`, *optional*, defaults to `False`):
|
||||
@@ -628,6 +772,7 @@ def search_civitai(search_word: str, **kwargs) -> Union[str, SearchResult, None]
|
||||
|
||||
# Extract additional parameters from kwargs
|
||||
model_type = kwargs.pop("model_type", "Checkpoint")
|
||||
sort = kwargs.pop("sort", None)
|
||||
download = kwargs.pop("download", False)
|
||||
base_model = kwargs.pop("base_model", None)
|
||||
force_download = kwargs.pop("force_download", False)
|
||||
@@ -642,6 +787,7 @@ def search_civitai(search_word: str, **kwargs) -> Union[str, SearchResult, None]
|
||||
repo_name = ""
|
||||
repo_id = ""
|
||||
version_id = ""
|
||||
trainedWords = ""
|
||||
models_list = []
|
||||
selected_repo = {}
|
||||
selected_model = {}
|
||||
@@ -652,12 +798,16 @@ def search_civitai(search_word: str, **kwargs) -> Union[str, SearchResult, None]
|
||||
params = {
|
||||
"query": search_word,
|
||||
"types": model_type,
|
||||
"sort": "Most Downloaded",
|
||||
"limit": 20,
|
||||
}
|
||||
if base_model is not None:
|
||||
if not isinstance(base_model, list):
|
||||
base_model = [base_model]
|
||||
params["baseModel"] = base_model
|
||||
|
||||
if sort is not None:
|
||||
params["sort"] = sort
|
||||
|
||||
headers = {}
|
||||
if token:
|
||||
headers["Authorization"] = f"Bearer {token}"
|
||||
@@ -686,25 +836,30 @@ def search_civitai(search_word: str, **kwargs) -> Union[str, SearchResult, None]
|
||||
|
||||
# Sort versions within the selected repo by download count
|
||||
sorted_versions = sorted(
|
||||
selected_repo["modelVersions"], key=lambda x: x["stats"]["downloadCount"], reverse=True
|
||||
selected_repo["modelVersions"],
|
||||
key=lambda x: x["stats"]["downloadCount"],
|
||||
reverse=True,
|
||||
)
|
||||
for selected_version in sorted_versions:
|
||||
version_id = selected_version["id"]
|
||||
trainedWords = selected_version["trainedWords"]
|
||||
models_list = []
|
||||
for model_data in selected_version["files"]:
|
||||
# Check if the file passes security scans and has a valid extension
|
||||
file_name = model_data["name"]
|
||||
if (
|
||||
model_data["pickleScanResult"] == "Success"
|
||||
and model_data["virusScanResult"] == "Success"
|
||||
and any(file_name.endswith(ext) for ext in EXTENSION)
|
||||
and os.path.basename(os.path.dirname(file_name)) not in DIFFUSERS_CONFIG_DIR
|
||||
):
|
||||
file_status = {
|
||||
"filename": file_name,
|
||||
"download_url": model_data["downloadUrl"],
|
||||
}
|
||||
models_list.append(file_status)
|
||||
# When searching for textual inversion, results other than the values entered for the base model may come up, so check again.
|
||||
if base_model is None or selected_version["baseModel"] in base_model:
|
||||
for model_data in selected_version["files"]:
|
||||
# Check if the file passes security scans and has a valid extension
|
||||
file_name = model_data["name"]
|
||||
if (
|
||||
model_data["pickleScanResult"] == "Success"
|
||||
and model_data["virusScanResult"] == "Success"
|
||||
and any(file_name.endswith(ext) for ext in EXTENSION)
|
||||
and os.path.basename(os.path.dirname(file_name)) not in DIFFUSERS_CONFIG_DIR
|
||||
):
|
||||
file_status = {
|
||||
"filename": file_name,
|
||||
"download_url": model_data["downloadUrl"],
|
||||
}
|
||||
models_list.append(file_status)
|
||||
|
||||
if models_list:
|
||||
# Sort the models list by filename and find the safest model
|
||||
@@ -764,19 +919,229 @@ def search_civitai(search_word: str, **kwargs) -> Union[str, SearchResult, None]
|
||||
repo_status=RepoStatus(repo_id=repo_name, repo_hash=repo_id, version=version_id),
|
||||
model_status=ModelStatus(
|
||||
search_word=search_word,
|
||||
site_url=f"https://civitai.com/models/{repo_id}?modelVersionId={version_id}",
|
||||
download_url=download_url,
|
||||
file_name=file_name,
|
||||
local=output_info["type"]["local"],
|
||||
),
|
||||
extra_status=ExtraStatus(trained_words=trainedWords or None),
|
||||
)
|
||||
|
||||
|
||||
def add_methods(pipeline):
|
||||
r"""
|
||||
Add methods from `AutoConfig` to the pipeline.
|
||||
|
||||
Parameters:
|
||||
pipeline (`Pipeline`):
|
||||
The pipeline to which the methods will be added.
|
||||
"""
|
||||
for attr_name in dir(AutoConfig):
|
||||
attr_value = getattr(AutoConfig, attr_name)
|
||||
if callable(attr_value) and not attr_name.startswith("__"):
|
||||
setattr(pipeline, attr_name, types.MethodType(attr_value, pipeline))
|
||||
return pipeline
|
||||
|
||||
|
||||
class AutoConfig:
|
||||
def auto_load_textual_inversion(
|
||||
self,
|
||||
pretrained_model_name_or_path: Union[str, List[str]],
|
||||
token: Optional[Union[str, List[str]]] = None,
|
||||
base_model: Optional[Union[str, List[str]]] = None,
|
||||
tokenizer=None,
|
||||
text_encoder=None,
|
||||
**kwargs,
|
||||
):
|
||||
r"""
|
||||
Load Textual Inversion embeddings into the text encoder of [`StableDiffusionPipeline`] (both 🤗 Diffusers and
|
||||
Automatic1111 formats are supported).
|
||||
|
||||
Parameters:
|
||||
pretrained_model_name_or_path (`str` or `os.PathLike` or `List[str or os.PathLike]` or `Dict` or `List[Dict]`):
|
||||
Can be either one of the following or a list of them:
|
||||
|
||||
- Search keywords for pretrained model (for example `EasyNegative`).
|
||||
- A string, the *model id* (for example `sd-concepts-library/low-poly-hd-logos-icons`) of a
|
||||
pretrained model hosted on the Hub.
|
||||
- A path to a *directory* (for example `./my_text_inversion_directory/`) containing the textual
|
||||
inversion weights.
|
||||
- A path to a *file* (for example `./my_text_inversions.pt`) containing textual inversion weights.
|
||||
- A [torch state
|
||||
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
|
||||
|
||||
token (`str` or `List[str]`, *optional*):
|
||||
Override the token to use for the textual inversion weights. If `pretrained_model_name_or_path` is a
|
||||
list, then `token` must also be a list of equal length.
|
||||
text_encoder ([`~transformers.CLIPTextModel`], *optional*):
|
||||
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
||||
If not specified, function will take self.tokenizer.
|
||||
tokenizer ([`~transformers.CLIPTokenizer`], *optional*):
|
||||
A `CLIPTokenizer` to tokenize text. If not specified, function will take self.tokenizer.
|
||||
weight_name (`str`, *optional*):
|
||||
Name of a custom weight file. This should be used when:
|
||||
|
||||
- The saved textual inversion file is in 🤗 Diffusers format, but was saved under a specific weight
|
||||
name such as `text_inv.bin`.
|
||||
- The saved textual inversion file is in the Automatic1111 format.
|
||||
cache_dir (`Union[str, os.PathLike]`, *optional*):
|
||||
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
|
||||
is not used.
|
||||
force_download (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
|
||||
cached versions if they exist.
|
||||
|
||||
proxies (`Dict[str, str]`, *optional*):
|
||||
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
|
||||
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
|
||||
local_files_only (`bool`, *optional*, defaults to `False`):
|
||||
Whether to only load local model weights and configuration files or not. If set to `True`, the model
|
||||
won't be downloaded from the Hub.
|
||||
token (`str` or *bool*, *optional*):
|
||||
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
|
||||
`diffusers-cli login` (stored in `~/.huggingface`) is used.
|
||||
revision (`str`, *optional*, defaults to `"main"`):
|
||||
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
|
||||
allowed by Git.
|
||||
subfolder (`str`, *optional*, defaults to `""`):
|
||||
The subfolder location of a model file within a larger model repository on the Hub or locally.
|
||||
mirror (`str`, *optional*):
|
||||
Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
|
||||
guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
|
||||
information.
|
||||
|
||||
Examples:
|
||||
|
||||
```py
|
||||
>>> from auto_diffusers import EasyPipelineForText2Image
|
||||
|
||||
>>> pipeline = EasyPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
|
||||
>>> pipeline.auto_load_textual_inversion("EasyNegative", token="EasyNegative")
|
||||
|
||||
>>> image = pipeline(prompt).images[0]
|
||||
```
|
||||
|
||||
"""
|
||||
# 1. Set tokenizer and text encoder
|
||||
tokenizer = tokenizer or getattr(self, "tokenizer", None)
|
||||
text_encoder = text_encoder or getattr(self, "text_encoder", None)
|
||||
|
||||
# Check if tokenizer and text encoder are provided
|
||||
if tokenizer is None or text_encoder is None:
|
||||
raise ValueError("Tokenizer and text encoder must be provided.")
|
||||
|
||||
# 2. Normalize inputs
|
||||
pretrained_model_name_or_paths = (
|
||||
[pretrained_model_name_or_path]
|
||||
if not isinstance(pretrained_model_name_or_path, list)
|
||||
else pretrained_model_name_or_path
|
||||
)
|
||||
|
||||
# 2.1 Normalize tokens
|
||||
tokens = [token] if not isinstance(token, list) else token
|
||||
if tokens[0] is None:
|
||||
tokens = tokens * len(pretrained_model_name_or_paths)
|
||||
|
||||
for check_token in tokens:
|
||||
# Check if token is already in tokenizer vocabulary
|
||||
if check_token in tokenizer.get_vocab():
|
||||
raise ValueError(
|
||||
f"Token {token} already in tokenizer vocabulary. Please choose a different token name or remove {token} and embedding from the tokenizer and text encoder."
|
||||
)
|
||||
|
||||
expected_shape = text_encoder.get_input_embeddings().weight.shape[-1] # Expected shape of tokenizer
|
||||
|
||||
for search_word in pretrained_model_name_or_paths:
|
||||
if isinstance(search_word, str):
|
||||
# Update kwargs to ensure the model is downloaded and parameters are included
|
||||
_status = {
|
||||
"download": True,
|
||||
"include_params": True,
|
||||
"skip_error": False,
|
||||
"model_type": "TextualInversion",
|
||||
}
|
||||
# Get tags for the base model of textual inversion compatible with tokenizer.
|
||||
# If the tokenizer is 768-dimensional, set tags for SD 1.x and SDXL.
|
||||
# If the tokenizer is 1024-dimensional, set tags for SD 2.x.
|
||||
if expected_shape in TOKENIZER_SHAPE_MAP:
|
||||
# Retrieve the appropriate tags from the TOKENIZER_SHAPE_MAP based on the expected shape
|
||||
tags = TOKENIZER_SHAPE_MAP[expected_shape]
|
||||
if base_model is not None:
|
||||
if isinstance(base_model, list):
|
||||
tags.extend(base_model)
|
||||
else:
|
||||
tags.append(base_model)
|
||||
_status["base_model"] = tags
|
||||
|
||||
kwargs.update(_status)
|
||||
# Search for the model on Civitai and get the model status
|
||||
textual_inversion_path = search_civitai(search_word, **kwargs)
|
||||
logger.warning(
|
||||
f"textual_inversion_path: {search_word} -> {textual_inversion_path.model_status.site_url}"
|
||||
)
|
||||
|
||||
pretrained_model_name_or_paths[
|
||||
pretrained_model_name_or_paths.index(search_word)
|
||||
] = textual_inversion_path.model_path
|
||||
|
||||
self.load_textual_inversion(
|
||||
pretrained_model_name_or_paths, token=tokens, tokenizer=tokenizer, text_encoder=text_encoder, **kwargs
|
||||
)
|
||||
|
||||
def auto_load_lora_weights(
|
||||
self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, **kwargs
|
||||
):
|
||||
r"""
|
||||
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
|
||||
`self.text_encoder`.
|
||||
|
||||
All kwargs are forwarded to `self.lora_state_dict`.
|
||||
|
||||
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is
|
||||
loaded.
|
||||
|
||||
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is
|
||||
loaded into `self.unet`.
|
||||
|
||||
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state
|
||||
dict is loaded into `self.text_encoder`.
|
||||
|
||||
Parameters:
|
||||
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
|
||||
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
|
||||
adapter_name (`str`, *optional*):
|
||||
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
|
||||
`default_{i}` where i is the total number of adapters being loaded.
|
||||
low_cpu_mem_usage (`bool`, *optional*):
|
||||
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
|
||||
weights.
|
||||
kwargs (`dict`, *optional*):
|
||||
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
|
||||
"""
|
||||
if isinstance(pretrained_model_name_or_path_or_dict, str):
|
||||
# Update kwargs to ensure the model is downloaded and parameters are included
|
||||
_status = {
|
||||
"download": True,
|
||||
"include_params": True,
|
||||
"skip_error": False,
|
||||
"model_type": "LORA",
|
||||
}
|
||||
kwargs.update(_status)
|
||||
# Search for the model on Civitai and get the model status
|
||||
lora_path = search_civitai(pretrained_model_name_or_path_or_dict, **kwargs)
|
||||
logger.warning(f"lora_path: {lora_path.model_status.site_url}")
|
||||
logger.warning(f"trained_words: {lora_path.extra_status.trained_words}")
|
||||
pretrained_model_name_or_path_or_dict = lora_path.model_path
|
||||
|
||||
self.load_lora_weights(pretrained_model_name_or_path_or_dict, adapter_name=adapter_name, **kwargs)
|
||||
|
||||
|
||||
class EasyPipelineForText2Image(AutoPipelineForText2Image):
|
||||
r"""
|
||||
|
||||
[`AutoPipelineForText2Image`] is a generic pipeline class that instantiates a text-to-image pipeline class. The
|
||||
[`EasyPipelineForText2Image`] is a generic pipeline class that instantiates a text-to-image pipeline class. The
|
||||
specific underlying pipeline class is automatically selected from either the
|
||||
[`~AutoPipelineForText2Image.from_pretrained`] or [`~AutoPipelineForText2Image.from_pipe`] methods.
|
||||
[`~EasyPipelineForText2Image.from_pretrained`], [`~EasyPipelineForText2Image.from_pipe`], [`~EasyPipelineForText2Image.from_huggingface`] or [`~EasyPipelineForText2Image.from_civitai`] methods.
|
||||
|
||||
This class cannot be instantiated using `__init__()` (throws an error).
|
||||
|
||||
@@ -891,9 +1256,9 @@ class EasyPipelineForText2Image(AutoPipelineForText2Image):
|
||||
Examples:
|
||||
|
||||
```py
|
||||
>>> from diffusers import AutoPipelineForText2Image
|
||||
>>> from auto_diffusers import EasyPipelineForText2Image
|
||||
|
||||
>>> pipeline = AutoPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> pipeline = EasyPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> image = pipeline(prompt).images[0]
|
||||
```
|
||||
"""
|
||||
@@ -907,20 +1272,21 @@ class EasyPipelineForText2Image(AutoPipelineForText2Image):
|
||||
kwargs.update(_status)
|
||||
|
||||
# Search for the model on Hugging Face and get the model status
|
||||
hf_model_status = search_huggingface(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {hf_model_status.model_status.download_url}")
|
||||
checkpoint_path = hf_model_status.model_path
|
||||
hf_checkpoint_status = search_huggingface(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {hf_checkpoint_status.model_status.download_url}")
|
||||
checkpoint_path = hf_checkpoint_status.model_path
|
||||
|
||||
# Check the format of the model checkpoint
|
||||
if hf_model_status.checkpoint_format == "single_file":
|
||||
if hf_checkpoint_status.loading_method == "from_single_file":
|
||||
# Load the pipeline from a single file checkpoint
|
||||
return load_pipeline_from_single_file(
|
||||
pipeline = load_pipeline_from_single_file(
|
||||
pretrained_model_or_path=checkpoint_path,
|
||||
pipeline_mapping=SINGLE_FILE_CHECKPOINT_TEXT2IMAGE_PIPELINE_MAPPING,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
return cls.from_pretrained(checkpoint_path, **kwargs)
|
||||
pipeline = cls.from_pretrained(checkpoint_path, **kwargs)
|
||||
return add_methods(pipeline)
|
||||
|
||||
@classmethod
|
||||
def from_civitai(cls, pretrained_model_link_or_path, **kwargs):
|
||||
@@ -999,9 +1365,9 @@ class EasyPipelineForText2Image(AutoPipelineForText2Image):
|
||||
Examples:
|
||||
|
||||
```py
|
||||
>>> from diffusers import AutoPipelineForText2Image
|
||||
>>> from auto_diffusers import EasyPipelineForText2Image
|
||||
|
||||
>>> pipeline = AutoPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> pipeline = EasyPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> image = pipeline(prompt).images[0]
|
||||
```
|
||||
"""
|
||||
@@ -1015,24 +1381,25 @@ class EasyPipelineForText2Image(AutoPipelineForText2Image):
|
||||
kwargs.update(_status)
|
||||
|
||||
# Search for the model on Civitai and get the model status
|
||||
model_status = search_civitai(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {model_status.model_status.download_url}")
|
||||
checkpoint_path = model_status.model_path
|
||||
checkpoint_status = search_civitai(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {checkpoint_status.model_status.site_url}")
|
||||
checkpoint_path = checkpoint_status.model_path
|
||||
|
||||
# Load the pipeline from a single file checkpoint
|
||||
return load_pipeline_from_single_file(
|
||||
pipeline = load_pipeline_from_single_file(
|
||||
pretrained_model_or_path=checkpoint_path,
|
||||
pipeline_mapping=SINGLE_FILE_CHECKPOINT_TEXT2IMAGE_PIPELINE_MAPPING,
|
||||
**kwargs,
|
||||
)
|
||||
return add_methods(pipeline)
|
||||
|
||||
|
||||
class EasyPipelineForImage2Image(AutoPipelineForImage2Image):
|
||||
r"""
|
||||
|
||||
[`AutoPipelineForImage2Image`] is a generic pipeline class that instantiates an image-to-image pipeline class. The
|
||||
[`EasyPipelineForImage2Image`] is a generic pipeline class that instantiates an image-to-image pipeline class. The
|
||||
specific underlying pipeline class is automatically selected from either the
|
||||
[`~AutoPipelineForImage2Image.from_pretrained`] or [`~AutoPipelineForImage2Image.from_pipe`] methods.
|
||||
[`~EasyPipelineForImage2Image.from_pretrained`], [`~EasyPipelineForImage2Image.from_pipe`], [`~EasyPipelineForImage2Image.from_huggingface`] or [`~EasyPipelineForImage2Image.from_civitai`] methods.
|
||||
|
||||
This class cannot be instantiated using `__init__()` (throws an error).
|
||||
|
||||
@@ -1147,10 +1514,10 @@ class EasyPipelineForImage2Image(AutoPipelineForImage2Image):
|
||||
Examples:
|
||||
|
||||
```py
|
||||
>>> from diffusers import AutoPipelineForText2Image
|
||||
>>> from auto_diffusers import EasyPipelineForImage2Image
|
||||
|
||||
>>> pipeline = AutoPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> image = pipeline(prompt).images[0]
|
||||
>>> pipeline = EasyPipelineForImage2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> image = pipeline(prompt, image).images[0]
|
||||
```
|
||||
"""
|
||||
# Update kwargs to ensure the model is downloaded and parameters are included
|
||||
@@ -1163,20 +1530,22 @@ class EasyPipelineForImage2Image(AutoPipelineForImage2Image):
|
||||
kwargs.update(_parmas)
|
||||
|
||||
# Search for the model on Hugging Face and get the model status
|
||||
model_status = search_huggingface(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {model_status.model_status.download_url}")
|
||||
checkpoint_path = model_status.model_path
|
||||
hf_checkpoint_status = search_huggingface(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {hf_checkpoint_status.model_status.download_url}")
|
||||
checkpoint_path = hf_checkpoint_status.model_path
|
||||
|
||||
# Check the format of the model checkpoint
|
||||
if model_status.checkpoint_format == "single_file":
|
||||
if hf_checkpoint_status.loading_method == "from_single_file":
|
||||
# Load the pipeline from a single file checkpoint
|
||||
return load_pipeline_from_single_file(
|
||||
pipeline = load_pipeline_from_single_file(
|
||||
pretrained_model_or_path=checkpoint_path,
|
||||
pipeline_mapping=SINGLE_FILE_CHECKPOINT_IMAGE2IMAGE_PIPELINE_MAPPING,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
return cls.from_pretrained(checkpoint_path, **kwargs)
|
||||
pipeline = cls.from_pretrained(checkpoint_path, **kwargs)
|
||||
|
||||
return add_methods(pipeline)
|
||||
|
||||
@classmethod
|
||||
def from_civitai(cls, pretrained_model_link_or_path, **kwargs):
|
||||
@@ -1255,10 +1624,10 @@ class EasyPipelineForImage2Image(AutoPipelineForImage2Image):
|
||||
Examples:
|
||||
|
||||
```py
|
||||
>>> from diffusers import AutoPipelineForText2Image
|
||||
>>> from auto_diffusers import EasyPipelineForImage2Image
|
||||
|
||||
>>> pipeline = AutoPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> image = pipeline(prompt).images[0]
|
||||
>>> pipeline = EasyPipelineForImage2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> image = pipeline(prompt, image).images[0]
|
||||
```
|
||||
"""
|
||||
# Update kwargs to ensure the model is downloaded and parameters are included
|
||||
@@ -1271,24 +1640,25 @@ class EasyPipelineForImage2Image(AutoPipelineForImage2Image):
|
||||
kwargs.update(_status)
|
||||
|
||||
# Search for the model on Civitai and get the model status
|
||||
model_status = search_civitai(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {model_status.model_status.download_url}")
|
||||
checkpoint_path = model_status.model_path
|
||||
checkpoint_status = search_civitai(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {checkpoint_status.model_status.site_url}")
|
||||
checkpoint_path = checkpoint_status.model_path
|
||||
|
||||
# Load the pipeline from a single file checkpoint
|
||||
return load_pipeline_from_single_file(
|
||||
pipeline = load_pipeline_from_single_file(
|
||||
pretrained_model_or_path=checkpoint_path,
|
||||
pipeline_mapping=SINGLE_FILE_CHECKPOINT_IMAGE2IMAGE_PIPELINE_MAPPING,
|
||||
**kwargs,
|
||||
)
|
||||
return add_methods(pipeline)
|
||||
|
||||
|
||||
class EasyPipelineForInpainting(AutoPipelineForInpainting):
|
||||
r"""
|
||||
|
||||
[`AutoPipelineForInpainting`] is a generic pipeline class that instantiates an inpainting pipeline class. The
|
||||
[`EasyPipelineForInpainting`] is a generic pipeline class that instantiates an inpainting pipeline class. The
|
||||
specific underlying pipeline class is automatically selected from either the
|
||||
[`~AutoPipelineForInpainting.from_pretrained`] or [`~AutoPipelineForInpainting.from_pipe`] methods.
|
||||
[`~EasyPipelineForInpainting.from_pretrained`], [`~EasyPipelineForInpainting.from_pipe`], [`~EasyPipelineForInpainting.from_huggingface`] or [`~EasyPipelineForInpainting.from_civitai`] methods.
|
||||
|
||||
This class cannot be instantiated using `__init__()` (throws an error).
|
||||
|
||||
@@ -1403,10 +1773,10 @@ class EasyPipelineForInpainting(AutoPipelineForInpainting):
|
||||
Examples:
|
||||
|
||||
```py
|
||||
>>> from diffusers import AutoPipelineForText2Image
|
||||
>>> from auto_diffusers import EasyPipelineForInpainting
|
||||
|
||||
>>> pipeline = AutoPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> image = pipeline(prompt).images[0]
|
||||
>>> pipeline = EasyPipelineForInpainting.from_huggingface("stable-diffusion-2-inpainting")
|
||||
>>> image = pipeline(prompt, image=init_image, mask_image=mask_image).images[0]
|
||||
```
|
||||
"""
|
||||
# Update kwargs to ensure the model is downloaded and parameters are included
|
||||
@@ -1419,20 +1789,21 @@ class EasyPipelineForInpainting(AutoPipelineForInpainting):
|
||||
kwargs.update(_status)
|
||||
|
||||
# Search for the model on Hugging Face and get the model status
|
||||
model_status = search_huggingface(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {model_status.model_status.download_url}")
|
||||
checkpoint_path = model_status.model_path
|
||||
hf_checkpoint_status = search_huggingface(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {hf_checkpoint_status.model_status.download_url}")
|
||||
checkpoint_path = hf_checkpoint_status.model_path
|
||||
|
||||
# Check the format of the model checkpoint
|
||||
if model_status.checkpoint_format == "single_file":
|
||||
if hf_checkpoint_status.loading_method == "from_single_file":
|
||||
# Load the pipeline from a single file checkpoint
|
||||
return load_pipeline_from_single_file(
|
||||
pipeline = load_pipeline_from_single_file(
|
||||
pretrained_model_or_path=checkpoint_path,
|
||||
pipeline_mapping=SINGLE_FILE_CHECKPOINT_INPAINT_PIPELINE_MAPPING,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
return cls.from_pretrained(checkpoint_path, **kwargs)
|
||||
pipeline = cls.from_pretrained(checkpoint_path, **kwargs)
|
||||
return add_methods(pipeline)
|
||||
|
||||
@classmethod
|
||||
def from_civitai(cls, pretrained_model_link_or_path, **kwargs):
|
||||
@@ -1511,10 +1882,10 @@ class EasyPipelineForInpainting(AutoPipelineForInpainting):
|
||||
Examples:
|
||||
|
||||
```py
|
||||
>>> from diffusers import AutoPipelineForText2Image
|
||||
>>> from auto_diffusers import EasyPipelineForInpainting
|
||||
|
||||
>>> pipeline = AutoPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> image = pipeline(prompt).images[0]
|
||||
>>> pipeline = EasyPipelineForInpainting.from_huggingface("stable-diffusion-2-inpainting")
|
||||
>>> image = pipeline(prompt, image=init_image, mask_image=mask_image).images[0]
|
||||
```
|
||||
"""
|
||||
# Update kwargs to ensure the model is downloaded and parameters are included
|
||||
@@ -1527,13 +1898,14 @@ class EasyPipelineForInpainting(AutoPipelineForInpainting):
|
||||
kwargs.update(_status)
|
||||
|
||||
# Search for the model on Civitai and get the model status
|
||||
model_status = search_civitai(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {model_status.model_status.download_url}")
|
||||
checkpoint_path = model_status.model_path
|
||||
checkpoint_status = search_civitai(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {checkpoint_status.model_status.site_url}")
|
||||
checkpoint_path = checkpoint_status.model_path
|
||||
|
||||
# Load the pipeline from a single file checkpoint
|
||||
return load_pipeline_from_single_file(
|
||||
pipeline = load_pipeline_from_single_file(
|
||||
pretrained_model_or_path=checkpoint_path,
|
||||
pipeline_mapping=SINGLE_FILE_CHECKPOINT_INPAINT_PIPELINE_MAPPING,
|
||||
**kwargs,
|
||||
)
|
||||
return add_methods(pipeline)
|
||||
|
||||
@@ -365,8 +365,8 @@ def parse_args():
|
||||
"--dream_training",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Use the DREAM training method, which makes training more efficient and accurate at the ",
|
||||
"expense of doing an extra forward pass. See: https://arxiv.org/abs/2312.00210",
|
||||
"Use the DREAM training method, which makes training more efficient and accurate at the "
|
||||
"expense of doing an extra forward pass. See: https://arxiv.org/abs/2312.00210"
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
|
||||
243
scripts/convert_cogview4_to_diffusers.py
Normal file
243
scripts/convert_cogview4_to_diffusers.py
Normal file
@@ -0,0 +1,243 @@
|
||||
"""
|
||||
Convert a CogView4 checkpoint from SAT(https://github.com/THUDM/SwissArmyTransformer) to the Diffusers format.
|
||||
(deprecated Since 2025-02-07 and will remove it in later CogView4 version)
|
||||
|
||||
This script converts a CogView4 checkpoint to the Diffusers format, which can then be used
|
||||
with the Diffusers library.
|
||||
|
||||
Example usage:
|
||||
python scripts/convert_cogview4_to_diffusers.py \
|
||||
--transformer_checkpoint_path 'your path/cogview4_6b/1/mp_rank_00_model_states.pt' \
|
||||
--vae_checkpoint_path 'your path/cogview4_6b/imagekl_ch16.pt' \
|
||||
--output_path "THUDM/CogView4-6B" \
|
||||
--dtype "bf16"
|
||||
|
||||
Arguments:
|
||||
--transformer_checkpoint_path: Path to Transformer state dict.
|
||||
--vae_checkpoint_path: Path to VAE state dict.
|
||||
--output_path: The path to save the converted model.
|
||||
--push_to_hub: Whether to push the converted checkpoint to the HF Hub or not. Defaults to `False`.
|
||||
--text_encoder_cache_dir: Cache directory where text encoder is located. Defaults to None, which means HF_HOME will be used
|
||||
--dtype: The dtype to save the model in (default: "bf16", options: "fp16", "bf16", "fp32"). If None, the dtype of the state dict is considered.
|
||||
|
||||
Default is "bf16" because CogView4 uses bfloat16 for Training.
|
||||
|
||||
Note: You must provide either --original_state_dict_repo_id or --checkpoint_path.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
from contextlib import nullcontext
|
||||
|
||||
import torch
|
||||
from accelerate import init_empty_weights
|
||||
from transformers import GlmForCausalLM, PreTrainedTokenizerFast
|
||||
|
||||
from diffusers import AutoencoderKL, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler
|
||||
from diffusers.loaders.single_file_utils import convert_ldm_vae_checkpoint
|
||||
from diffusers.utils.import_utils import is_accelerate_available
|
||||
|
||||
|
||||
CTX = init_empty_weights if is_accelerate_available() else nullcontext
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--transformer_checkpoint_path", default=None, type=str)
|
||||
parser.add_argument("--vae_checkpoint_path", default=None, type=str)
|
||||
parser.add_argument("--output_path", required=True, type=str)
|
||||
parser.add_argument("--push_to_hub", action="store_true", default=False, help="Whether to push to HF Hub after saving")
|
||||
parser.add_argument("--text_encoder_cache_dir", type=str, default=None, help="Path to text encoder cache directory")
|
||||
parser.add_argument("--dtype", type=str, default="bf16")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
# this is specific to `AdaLayerNormContinuous`:
|
||||
# diffusers implementation split the linear projection into the scale, shift while CogView4 split it tino shift, scale
|
||||
def swap_scale_shift(weight, dim):
|
||||
shift, scale = weight.chunk(2, dim=0)
|
||||
new_weight = torch.cat([scale, shift], dim=0)
|
||||
return new_weight
|
||||
|
||||
|
||||
def convert_cogview4_transformer_checkpoint_to_diffusers(ckpt_path):
|
||||
original_state_dict = torch.load(ckpt_path, map_location="cpu")
|
||||
original_state_dict = original_state_dict["module"]
|
||||
original_state_dict = {k.replace("model.diffusion_model.", ""): v for k, v in original_state_dict.items()}
|
||||
|
||||
new_state_dict = {}
|
||||
|
||||
# Convert patch_embed
|
||||
new_state_dict["patch_embed.proj.weight"] = original_state_dict.pop("mixins.patch_embed.proj.weight")
|
||||
new_state_dict["patch_embed.proj.bias"] = original_state_dict.pop("mixins.patch_embed.proj.bias")
|
||||
new_state_dict["patch_embed.text_proj.weight"] = original_state_dict.pop("mixins.patch_embed.text_proj.weight")
|
||||
new_state_dict["patch_embed.text_proj.bias"] = original_state_dict.pop("mixins.patch_embed.text_proj.bias")
|
||||
|
||||
# Convert time_condition_embed
|
||||
new_state_dict["time_condition_embed.timestep_embedder.linear_1.weight"] = original_state_dict.pop(
|
||||
"time_embed.0.weight"
|
||||
)
|
||||
new_state_dict["time_condition_embed.timestep_embedder.linear_1.bias"] = original_state_dict.pop(
|
||||
"time_embed.0.bias"
|
||||
)
|
||||
new_state_dict["time_condition_embed.timestep_embedder.linear_2.weight"] = original_state_dict.pop(
|
||||
"time_embed.2.weight"
|
||||
)
|
||||
new_state_dict["time_condition_embed.timestep_embedder.linear_2.bias"] = original_state_dict.pop(
|
||||
"time_embed.2.bias"
|
||||
)
|
||||
new_state_dict["time_condition_embed.condition_embedder.linear_1.weight"] = original_state_dict.pop(
|
||||
"label_emb.0.0.weight"
|
||||
)
|
||||
new_state_dict["time_condition_embed.condition_embedder.linear_1.bias"] = original_state_dict.pop(
|
||||
"label_emb.0.0.bias"
|
||||
)
|
||||
new_state_dict["time_condition_embed.condition_embedder.linear_2.weight"] = original_state_dict.pop(
|
||||
"label_emb.0.2.weight"
|
||||
)
|
||||
new_state_dict["time_condition_embed.condition_embedder.linear_2.bias"] = original_state_dict.pop(
|
||||
"label_emb.0.2.bias"
|
||||
)
|
||||
|
||||
# Convert transformer blocks, for cogview4 is 28 blocks
|
||||
for i in range(28):
|
||||
block_prefix = f"transformer_blocks.{i}."
|
||||
old_prefix = f"transformer.layers.{i}."
|
||||
adaln_prefix = f"mixins.adaln.adaln_modules.{i}."
|
||||
new_state_dict[block_prefix + "norm1.linear.weight"] = original_state_dict.pop(adaln_prefix + "1.weight")
|
||||
new_state_dict[block_prefix + "norm1.linear.bias"] = original_state_dict.pop(adaln_prefix + "1.bias")
|
||||
|
||||
qkv_weight = original_state_dict.pop(old_prefix + "attention.query_key_value.weight")
|
||||
qkv_bias = original_state_dict.pop(old_prefix + "attention.query_key_value.bias")
|
||||
q, k, v = qkv_weight.chunk(3, dim=0)
|
||||
q_bias, k_bias, v_bias = qkv_bias.chunk(3, dim=0)
|
||||
|
||||
new_state_dict[block_prefix + "attn1.to_q.weight"] = q
|
||||
new_state_dict[block_prefix + "attn1.to_q.bias"] = q_bias
|
||||
new_state_dict[block_prefix + "attn1.to_k.weight"] = k
|
||||
new_state_dict[block_prefix + "attn1.to_k.bias"] = k_bias
|
||||
new_state_dict[block_prefix + "attn1.to_v.weight"] = v
|
||||
new_state_dict[block_prefix + "attn1.to_v.bias"] = v_bias
|
||||
|
||||
new_state_dict[block_prefix + "attn1.to_out.0.weight"] = original_state_dict.pop(
|
||||
old_prefix + "attention.dense.weight"
|
||||
)
|
||||
new_state_dict[block_prefix + "attn1.to_out.0.bias"] = original_state_dict.pop(
|
||||
old_prefix + "attention.dense.bias"
|
||||
)
|
||||
|
||||
new_state_dict[block_prefix + "ff.net.0.proj.weight"] = original_state_dict.pop(
|
||||
old_prefix + "mlp.dense_h_to_4h.weight"
|
||||
)
|
||||
new_state_dict[block_prefix + "ff.net.0.proj.bias"] = original_state_dict.pop(
|
||||
old_prefix + "mlp.dense_h_to_4h.bias"
|
||||
)
|
||||
new_state_dict[block_prefix + "ff.net.2.weight"] = original_state_dict.pop(
|
||||
old_prefix + "mlp.dense_4h_to_h.weight"
|
||||
)
|
||||
new_state_dict[block_prefix + "ff.net.2.bias"] = original_state_dict.pop(old_prefix + "mlp.dense_4h_to_h.bias")
|
||||
|
||||
# Convert final norm and projection
|
||||
new_state_dict["norm_out.linear.weight"] = swap_scale_shift(
|
||||
original_state_dict.pop("mixins.final_layer.adaln.1.weight"), dim=0
|
||||
)
|
||||
new_state_dict["norm_out.linear.bias"] = swap_scale_shift(
|
||||
original_state_dict.pop("mixins.final_layer.adaln.1.bias"), dim=0
|
||||
)
|
||||
new_state_dict["proj_out.weight"] = original_state_dict.pop("mixins.final_layer.linear.weight")
|
||||
new_state_dict["proj_out.bias"] = original_state_dict.pop("mixins.final_layer.linear.bias")
|
||||
|
||||
return new_state_dict
|
||||
|
||||
|
||||
def convert_cogview4_vae_checkpoint_to_diffusers(ckpt_path, vae_config):
|
||||
original_state_dict = torch.load(ckpt_path, map_location="cpu")["state_dict"]
|
||||
return convert_ldm_vae_checkpoint(original_state_dict, vae_config)
|
||||
|
||||
|
||||
def main(args):
|
||||
if args.dtype == "fp16":
|
||||
dtype = torch.float16
|
||||
elif args.dtype == "bf16":
|
||||
dtype = torch.bfloat16
|
||||
elif args.dtype == "fp32":
|
||||
dtype = torch.float32
|
||||
else:
|
||||
raise ValueError(f"Unsupported dtype: {args.dtype}")
|
||||
|
||||
transformer = None
|
||||
vae = None
|
||||
|
||||
if args.transformer_checkpoint_path is not None:
|
||||
converted_transformer_state_dict = convert_cogview4_transformer_checkpoint_to_diffusers(
|
||||
args.transformer_checkpoint_path
|
||||
)
|
||||
transformer = CogView4Transformer2DModel(
|
||||
patch_size=2,
|
||||
in_channels=16,
|
||||
num_layers=28,
|
||||
attention_head_dim=128,
|
||||
num_attention_heads=32,
|
||||
out_channels=16,
|
||||
text_embed_dim=4096,
|
||||
time_embed_dim=512,
|
||||
condition_dim=256,
|
||||
pos_embed_max_size=128,
|
||||
)
|
||||
transformer.load_state_dict(converted_transformer_state_dict, strict=True)
|
||||
if dtype is not None:
|
||||
# Original checkpoint data type will be preserved
|
||||
transformer = transformer.to(dtype=dtype)
|
||||
|
||||
if args.vae_checkpoint_path is not None:
|
||||
vae_config = {
|
||||
"in_channels": 3,
|
||||
"out_channels": 3,
|
||||
"down_block_types": ("DownEncoderBlock2D",) * 4,
|
||||
"up_block_types": ("UpDecoderBlock2D",) * 4,
|
||||
"block_out_channels": (128, 512, 1024, 1024),
|
||||
"layers_per_block": 3,
|
||||
"act_fn": "silu",
|
||||
"latent_channels": 16,
|
||||
"norm_num_groups": 32,
|
||||
"sample_size": 1024,
|
||||
"scaling_factor": 1.0,
|
||||
"force_upcast": True,
|
||||
"use_quant_conv": False,
|
||||
"use_post_quant_conv": False,
|
||||
"mid_block_add_attention": False,
|
||||
}
|
||||
converted_vae_state_dict = convert_cogview4_vae_checkpoint_to_diffusers(args.vae_checkpoint_path, vae_config)
|
||||
vae = AutoencoderKL(**vae_config)
|
||||
vae.load_state_dict(converted_vae_state_dict, strict=True)
|
||||
if dtype is not None:
|
||||
vae = vae.to(dtype=dtype)
|
||||
|
||||
text_encoder_id = "THUDM/glm-4-9b-hf"
|
||||
tokenizer = PreTrainedTokenizerFast.from_pretrained(text_encoder_id)
|
||||
text_encoder = GlmForCausalLM.from_pretrained(
|
||||
text_encoder_id,
|
||||
cache_dir=args.text_encoder_cache_dir,
|
||||
torch_dtype=torch.bfloat16 if args.dtype == "bf16" else torch.float32,
|
||||
)
|
||||
|
||||
for param in text_encoder.parameters():
|
||||
param.data = param.data.contiguous()
|
||||
|
||||
scheduler = FlowMatchEulerDiscreteScheduler(
|
||||
base_shift=0.25, max_shift=0.75, base_image_seq_len=256, use_dynamic_shifting=True, time_shift_type="linear"
|
||||
)
|
||||
|
||||
pipe = CogView4Pipeline(
|
||||
tokenizer=tokenizer,
|
||||
text_encoder=text_encoder,
|
||||
vae=vae,
|
||||
transformer=transformer,
|
||||
scheduler=scheduler,
|
||||
)
|
||||
|
||||
# This is necessary for users with insufficient memory, such as those using Colab and notebooks, as it can
|
||||
# save some memory used for model loading.
|
||||
pipe.save_pretrained(args.output_path, safe_serialization=True, max_shard_size="5GB", push_to_hub=args.push_to_hub)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(args)
|
||||
366
scripts/convert_cogview4_to_diffusers_megatron.py
Normal file
366
scripts/convert_cogview4_to_diffusers_megatron.py
Normal file
@@ -0,0 +1,366 @@
|
||||
"""
|
||||
Convert a CogView4 checkpoint from Megatron to the Diffusers format.
|
||||
|
||||
Example usage:
|
||||
python scripts/convert_cogview4_to_diffusers.py \
|
||||
--transformer_checkpoint_path 'your path/cogview4_6b/mp_rank_00/model_optim_rng.pt' \
|
||||
--vae_checkpoint_path 'your path/cogview4_6b/imagekl_ch16.pt' \
|
||||
--output_path "THUDM/CogView4-6B" \
|
||||
--dtype "bf16"
|
||||
|
||||
Arguments:
|
||||
--transformer_checkpoint_path: Path to Transformer state dict.
|
||||
--vae_checkpoint_path: Path to VAE state dict.
|
||||
--output_path: The path to save the converted model.
|
||||
--push_to_hub: Whether to push the converted checkpoint to the HF Hub or not. Defaults to `False`.
|
||||
--text_encoder_cache_dir: Cache directory where text encoder is located. Defaults to None, which means HF_HOME will be used.
|
||||
--dtype: The dtype to save the model in (default: "bf16", options: "fp16", "bf16", "fp32"). If None, the dtype of the state dict is considered.
|
||||
|
||||
Default is "bf16" because CogView4 uses bfloat16 for training.
|
||||
|
||||
Note: You must provide either --transformer_checkpoint_path or --vae_checkpoint_path.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
from transformers import GlmForCausalLM, PreTrainedTokenizerFast
|
||||
|
||||
from diffusers import AutoencoderKL, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler
|
||||
from diffusers.loaders.single_file_utils import convert_ldm_vae_checkpoint
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--transformer_checkpoint_path",
|
||||
default=None,
|
||||
type=str,
|
||||
help="Path to Megatron (not SAT) Transformer checkpoint, e.g., 'model_optim_rng.pt'.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--vae_checkpoint_path",
|
||||
default=None,
|
||||
type=str,
|
||||
help="(Optional) Path to VAE checkpoint, e.g., 'imagekl_ch16.pt'.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output_path",
|
||||
required=True,
|
||||
type=str,
|
||||
help="Directory to save the final Diffusers format pipeline.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--push_to_hub",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Whether to push the converted model to the HuggingFace Hub.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--text_encoder_cache_dir",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Specify the cache directory for the text encoder.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dtype",
|
||||
type=str,
|
||||
default="bf16",
|
||||
choices=["fp16", "bf16", "fp32"],
|
||||
help="Data type to save the model in.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--num_layers",
|
||||
type=int,
|
||||
default=28,
|
||||
help="Number of Transformer layers (e.g., 28, 48...).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--num_heads",
|
||||
type=int,
|
||||
default=32,
|
||||
help="Number of attention heads.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--hidden_size",
|
||||
type=int,
|
||||
default=4096,
|
||||
help="Transformer hidden dimension size.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--attention_head_dim",
|
||||
type=int,
|
||||
default=128,
|
||||
help="Dimension of each attention head.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--time_embed_dim",
|
||||
type=int,
|
||||
default=512,
|
||||
help="Dimension of time embeddings.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--condition_dim",
|
||||
type=int,
|
||||
default=256,
|
||||
help="Dimension of condition embeddings.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pos_embed_max_size",
|
||||
type=int,
|
||||
default=128,
|
||||
help="Maximum size for positional embeddings.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
def swap_scale_shift(weight, dim):
|
||||
"""
|
||||
Swap the scale and shift components in the weight tensor.
|
||||
|
||||
Args:
|
||||
weight (torch.Tensor): The original weight tensor.
|
||||
dim (int): The dimension along which to split.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: The modified weight tensor with scale and shift swapped.
|
||||
"""
|
||||
shift, scale = weight.chunk(2, dim=dim)
|
||||
new_weight = torch.cat([scale, shift], dim=dim)
|
||||
return new_weight
|
||||
|
||||
|
||||
def convert_megatron_transformer_checkpoint_to_diffusers(
|
||||
ckpt_path: str,
|
||||
num_layers: int,
|
||||
num_heads: int,
|
||||
hidden_size: int,
|
||||
):
|
||||
"""
|
||||
Convert a Megatron Transformer checkpoint to Diffusers format.
|
||||
|
||||
Args:
|
||||
ckpt_path (str): Path to the Megatron Transformer checkpoint.
|
||||
num_layers (int): Number of Transformer layers.
|
||||
num_heads (int): Number of attention heads.
|
||||
hidden_size (int): Hidden size of the Transformer.
|
||||
|
||||
Returns:
|
||||
dict: The converted state dictionary compatible with Diffusers.
|
||||
"""
|
||||
ckpt = torch.load(ckpt_path, map_location="cpu")
|
||||
mega = ckpt["model"]
|
||||
|
||||
new_state_dict = {}
|
||||
|
||||
# Patch Embedding
|
||||
new_state_dict["patch_embed.proj.weight"] = mega["encoder_expand_linear.weight"].reshape(hidden_size, 64)
|
||||
new_state_dict["patch_embed.proj.bias"] = mega["encoder_expand_linear.bias"]
|
||||
new_state_dict["patch_embed.text_proj.weight"] = mega["text_projector.weight"]
|
||||
new_state_dict["patch_embed.text_proj.bias"] = mega["text_projector.bias"]
|
||||
|
||||
# Time Condition Embedding
|
||||
new_state_dict["time_condition_embed.timestep_embedder.linear_1.weight"] = mega[
|
||||
"time_embedding.time_embed.0.weight"
|
||||
]
|
||||
new_state_dict["time_condition_embed.timestep_embedder.linear_1.bias"] = mega["time_embedding.time_embed.0.bias"]
|
||||
new_state_dict["time_condition_embed.timestep_embedder.linear_2.weight"] = mega[
|
||||
"time_embedding.time_embed.2.weight"
|
||||
]
|
||||
new_state_dict["time_condition_embed.timestep_embedder.linear_2.bias"] = mega["time_embedding.time_embed.2.bias"]
|
||||
|
||||
new_state_dict["time_condition_embed.condition_embedder.linear_1.weight"] = mega[
|
||||
"label_embedding.label_embed.0.weight"
|
||||
]
|
||||
new_state_dict["time_condition_embed.condition_embedder.linear_1.bias"] = mega[
|
||||
"label_embedding.label_embed.0.bias"
|
||||
]
|
||||
new_state_dict["time_condition_embed.condition_embedder.linear_2.weight"] = mega[
|
||||
"label_embedding.label_embed.2.weight"
|
||||
]
|
||||
new_state_dict["time_condition_embed.condition_embedder.linear_2.bias"] = mega[
|
||||
"label_embedding.label_embed.2.bias"
|
||||
]
|
||||
|
||||
# Convert each Transformer layer
|
||||
for i in tqdm(range(num_layers), desc="Converting layers (Megatron->Diffusers)"):
|
||||
block_prefix = f"transformer_blocks.{i}."
|
||||
|
||||
# AdaLayerNorm
|
||||
new_state_dict[block_prefix + "norm1.linear.weight"] = swap_scale_shift(
|
||||
mega[f"decoder.layers.{i}.adaln.weight"], dim=0
|
||||
)
|
||||
new_state_dict[block_prefix + "norm1.linear.bias"] = swap_scale_shift(
|
||||
mega[f"decoder.layers.{i}.adaln.bias"], dim=0
|
||||
)
|
||||
|
||||
# QKV
|
||||
qkv_weight = mega[f"decoder.layers.{i}.self_attention.linear_qkv.weight"]
|
||||
qkv_bias = mega[f"decoder.layers.{i}.self_attention.linear_qkv.bias"]
|
||||
|
||||
# Reshape to match SAT logic
|
||||
qkv_weight = qkv_weight.view(num_heads, 3, hidden_size // num_heads, hidden_size)
|
||||
qkv_weight = qkv_weight.permute(1, 0, 2, 3).reshape(3 * hidden_size, hidden_size)
|
||||
|
||||
qkv_bias = qkv_bias.view(num_heads, 3, hidden_size // num_heads)
|
||||
qkv_bias = qkv_bias.permute(1, 0, 2).reshape(3 * hidden_size)
|
||||
|
||||
# Assign to Diffusers keys
|
||||
q, k, v = torch.chunk(qkv_weight, 3, dim=0)
|
||||
qb, kb, vb = torch.chunk(qkv_bias, 3, dim=0)
|
||||
|
||||
new_state_dict[block_prefix + "attn1.to_q.weight"] = q
|
||||
new_state_dict[block_prefix + "attn1.to_q.bias"] = qb
|
||||
new_state_dict[block_prefix + "attn1.to_k.weight"] = k
|
||||
new_state_dict[block_prefix + "attn1.to_k.bias"] = kb
|
||||
new_state_dict[block_prefix + "attn1.to_v.weight"] = v
|
||||
new_state_dict[block_prefix + "attn1.to_v.bias"] = vb
|
||||
|
||||
# Attention Output
|
||||
new_state_dict[block_prefix + "attn1.to_out.0.weight"] = mega[
|
||||
f"decoder.layers.{i}.self_attention.linear_proj.weight"
|
||||
].T
|
||||
new_state_dict[block_prefix + "attn1.to_out.0.bias"] = mega[
|
||||
f"decoder.layers.{i}.self_attention.linear_proj.bias"
|
||||
]
|
||||
|
||||
# MLP
|
||||
new_state_dict[block_prefix + "ff.net.0.proj.weight"] = mega[f"decoder.layers.{i}.mlp.linear_fc1.weight"]
|
||||
new_state_dict[block_prefix + "ff.net.0.proj.bias"] = mega[f"decoder.layers.{i}.mlp.linear_fc1.bias"]
|
||||
new_state_dict[block_prefix + "ff.net.2.weight"] = mega[f"decoder.layers.{i}.mlp.linear_fc2.weight"]
|
||||
new_state_dict[block_prefix + "ff.net.2.bias"] = mega[f"decoder.layers.{i}.mlp.linear_fc2.bias"]
|
||||
|
||||
# Final Layers
|
||||
new_state_dict["norm_out.linear.weight"] = swap_scale_shift(mega["adaln_final.weight"], dim=0)
|
||||
new_state_dict["norm_out.linear.bias"] = swap_scale_shift(mega["adaln_final.bias"], dim=0)
|
||||
new_state_dict["proj_out.weight"] = mega["output_projector.weight"]
|
||||
new_state_dict["proj_out.bias"] = mega["output_projector.bias"]
|
||||
|
||||
return new_state_dict
|
||||
|
||||
|
||||
def convert_cogview4_vae_checkpoint_to_diffusers(ckpt_path, vae_config):
|
||||
"""
|
||||
Convert a CogView4 VAE checkpoint to Diffusers format.
|
||||
|
||||
Args:
|
||||
ckpt_path (str): Path to the VAE checkpoint.
|
||||
vae_config (dict): Configuration dictionary for the VAE.
|
||||
|
||||
Returns:
|
||||
dict: The converted VAE state dictionary compatible with Diffusers.
|
||||
"""
|
||||
original_state_dict = torch.load(ckpt_path, map_location="cpu")["state_dict"]
|
||||
return convert_ldm_vae_checkpoint(original_state_dict, vae_config)
|
||||
|
||||
|
||||
def main(args):
|
||||
"""
|
||||
Main function to convert CogView4 checkpoints to Diffusers format.
|
||||
|
||||
Args:
|
||||
args (argparse.Namespace): Parsed command-line arguments.
|
||||
"""
|
||||
# Determine the desired data type
|
||||
if args.dtype == "fp16":
|
||||
dtype = torch.float16
|
||||
elif args.dtype == "bf16":
|
||||
dtype = torch.bfloat16
|
||||
elif args.dtype == "fp32":
|
||||
dtype = torch.float32
|
||||
else:
|
||||
raise ValueError(f"Unsupported dtype: {args.dtype}")
|
||||
|
||||
transformer = None
|
||||
vae = None
|
||||
|
||||
# Convert Transformer checkpoint if provided
|
||||
if args.transformer_checkpoint_path is not None:
|
||||
converted_transformer_state_dict = convert_megatron_transformer_checkpoint_to_diffusers(
|
||||
ckpt_path=args.transformer_checkpoint_path,
|
||||
num_layers=args.num_layers,
|
||||
num_heads=args.num_heads,
|
||||
hidden_size=args.hidden_size,
|
||||
)
|
||||
transformer = CogView4Transformer2DModel(
|
||||
patch_size=2,
|
||||
in_channels=16,
|
||||
num_layers=args.num_layers,
|
||||
attention_head_dim=args.attention_head_dim,
|
||||
num_attention_heads=args.num_heads,
|
||||
out_channels=16,
|
||||
text_embed_dim=args.hidden_size,
|
||||
time_embed_dim=args.time_embed_dim,
|
||||
condition_dim=args.condition_dim,
|
||||
pos_embed_max_size=args.pos_embed_max_size,
|
||||
)
|
||||
|
||||
transformer.load_state_dict(converted_transformer_state_dict, strict=True)
|
||||
|
||||
# Convert to the specified dtype
|
||||
if dtype is not None:
|
||||
transformer = transformer.to(dtype=dtype)
|
||||
|
||||
# Convert VAE checkpoint if provided
|
||||
if args.vae_checkpoint_path is not None:
|
||||
vae_config = {
|
||||
"in_channels": 3,
|
||||
"out_channels": 3,
|
||||
"down_block_types": ("DownEncoderBlock2D",) * 4,
|
||||
"up_block_types": ("UpDecoderBlock2D",) * 4,
|
||||
"block_out_channels": (128, 512, 1024, 1024),
|
||||
"layers_per_block": 3,
|
||||
"act_fn": "silu",
|
||||
"latent_channels": 16,
|
||||
"norm_num_groups": 32,
|
||||
"sample_size": 1024,
|
||||
"scaling_factor": 1.0,
|
||||
"force_upcast": True,
|
||||
"use_quant_conv": False,
|
||||
"use_post_quant_conv": False,
|
||||
"mid_block_add_attention": False,
|
||||
}
|
||||
converted_vae_state_dict = convert_cogview4_vae_checkpoint_to_diffusers(args.vae_checkpoint_path, vae_config)
|
||||
vae = AutoencoderKL(**vae_config)
|
||||
vae.load_state_dict(converted_vae_state_dict, strict=True)
|
||||
if dtype is not None:
|
||||
vae = vae.to(dtype=dtype)
|
||||
|
||||
# Load the text encoder and tokenizer
|
||||
text_encoder_id = "THUDM/glm-4-9b-hf"
|
||||
tokenizer = PreTrainedTokenizerFast.from_pretrained(text_encoder_id)
|
||||
text_encoder = GlmForCausalLM.from_pretrained(
|
||||
text_encoder_id,
|
||||
cache_dir=args.text_encoder_cache_dir,
|
||||
torch_dtype=torch.bfloat16 if args.dtype == "bf16" else torch.float32,
|
||||
)
|
||||
for param in text_encoder.parameters():
|
||||
param.data = param.data.contiguous()
|
||||
|
||||
# Initialize the scheduler
|
||||
scheduler = FlowMatchEulerDiscreteScheduler(
|
||||
base_shift=0.25, max_shift=0.75, base_image_seq_len=256, use_dynamic_shifting=True, time_shift_type="linear"
|
||||
)
|
||||
|
||||
# Create the pipeline
|
||||
pipe = CogView4Pipeline(
|
||||
tokenizer=tokenizer,
|
||||
text_encoder=text_encoder,
|
||||
vae=vae,
|
||||
transformer=transformer,
|
||||
scheduler=scheduler,
|
||||
)
|
||||
|
||||
# Save the converted pipeline
|
||||
pipe.save_pretrained(
|
||||
args.output_path,
|
||||
safe_serialization=True,
|
||||
max_shard_size="5GB",
|
||||
push_to_hub=args.push_to_hub,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(args)
|
||||
203
scripts/convert_omnigen_to_diffusers.py
Normal file
203
scripts/convert_omnigen_to_diffusers.py
Normal file
@@ -0,0 +1,203 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import torch
|
||||
from huggingface_hub import snapshot_download
|
||||
from safetensors.torch import load_file
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, OmniGenPipeline, OmniGenTransformer2DModel
|
||||
|
||||
|
||||
def main(args):
|
||||
# checkpoint from https://huggingface.co/Shitao/OmniGen-v1
|
||||
|
||||
if not os.path.exists(args.origin_ckpt_path):
|
||||
print("Model not found, downloading...")
|
||||
cache_folder = os.getenv("HF_HUB_CACHE")
|
||||
args.origin_ckpt_path = snapshot_download(
|
||||
repo_id=args.origin_ckpt_path,
|
||||
cache_dir=cache_folder,
|
||||
ignore_patterns=["flax_model.msgpack", "rust_model.ot", "tf_model.h5", "model.pt"],
|
||||
)
|
||||
print(f"Downloaded model to {args.origin_ckpt_path}")
|
||||
|
||||
ckpt = os.path.join(args.origin_ckpt_path, "model.safetensors")
|
||||
ckpt = load_file(ckpt, device="cpu")
|
||||
|
||||
mapping_dict = {
|
||||
"pos_embed": "patch_embedding.pos_embed",
|
||||
"x_embedder.proj.weight": "patch_embedding.output_image_proj.weight",
|
||||
"x_embedder.proj.bias": "patch_embedding.output_image_proj.bias",
|
||||
"input_x_embedder.proj.weight": "patch_embedding.input_image_proj.weight",
|
||||
"input_x_embedder.proj.bias": "patch_embedding.input_image_proj.bias",
|
||||
"final_layer.adaLN_modulation.1.weight": "norm_out.linear.weight",
|
||||
"final_layer.adaLN_modulation.1.bias": "norm_out.linear.bias",
|
||||
"final_layer.linear.weight": "proj_out.weight",
|
||||
"final_layer.linear.bias": "proj_out.bias",
|
||||
"time_token.mlp.0.weight": "time_token.linear_1.weight",
|
||||
"time_token.mlp.0.bias": "time_token.linear_1.bias",
|
||||
"time_token.mlp.2.weight": "time_token.linear_2.weight",
|
||||
"time_token.mlp.2.bias": "time_token.linear_2.bias",
|
||||
"t_embedder.mlp.0.weight": "t_embedder.linear_1.weight",
|
||||
"t_embedder.mlp.0.bias": "t_embedder.linear_1.bias",
|
||||
"t_embedder.mlp.2.weight": "t_embedder.linear_2.weight",
|
||||
"t_embedder.mlp.2.bias": "t_embedder.linear_2.bias",
|
||||
"llm.embed_tokens.weight": "embed_tokens.weight",
|
||||
}
|
||||
|
||||
converted_state_dict = {}
|
||||
for k, v in ckpt.items():
|
||||
if k in mapping_dict:
|
||||
converted_state_dict[mapping_dict[k]] = v
|
||||
elif "qkv" in k:
|
||||
to_q, to_k, to_v = v.chunk(3)
|
||||
converted_state_dict[f"layers.{k.split('.')[2]}.self_attn.to_q.weight"] = to_q
|
||||
converted_state_dict[f"layers.{k.split('.')[2]}.self_attn.to_k.weight"] = to_k
|
||||
converted_state_dict[f"layers.{k.split('.')[2]}.self_attn.to_v.weight"] = to_v
|
||||
elif "o_proj" in k:
|
||||
converted_state_dict[f"layers.{k.split('.')[2]}.self_attn.to_out.0.weight"] = v
|
||||
else:
|
||||
converted_state_dict[k[4:]] = v
|
||||
|
||||
transformer = OmniGenTransformer2DModel(
|
||||
rope_scaling={
|
||||
"long_factor": [
|
||||
1.0299999713897705,
|
||||
1.0499999523162842,
|
||||
1.0499999523162842,
|
||||
1.0799999237060547,
|
||||
1.2299998998641968,
|
||||
1.2299998998641968,
|
||||
1.2999999523162842,
|
||||
1.4499999284744263,
|
||||
1.5999999046325684,
|
||||
1.6499998569488525,
|
||||
1.8999998569488525,
|
||||
2.859999895095825,
|
||||
3.68999981880188,
|
||||
5.419999599456787,
|
||||
5.489999771118164,
|
||||
5.489999771118164,
|
||||
9.09000015258789,
|
||||
11.579999923706055,
|
||||
15.65999984741211,
|
||||
15.769999504089355,
|
||||
15.789999961853027,
|
||||
18.360000610351562,
|
||||
21.989999771118164,
|
||||
23.079999923706055,
|
||||
30.009998321533203,
|
||||
32.35000228881836,
|
||||
32.590003967285156,
|
||||
35.56000518798828,
|
||||
39.95000457763672,
|
||||
53.840003967285156,
|
||||
56.20000457763672,
|
||||
57.95000457763672,
|
||||
59.29000473022461,
|
||||
59.77000427246094,
|
||||
59.920005798339844,
|
||||
61.190006256103516,
|
||||
61.96000671386719,
|
||||
62.50000762939453,
|
||||
63.3700065612793,
|
||||
63.48000717163086,
|
||||
63.48000717163086,
|
||||
63.66000747680664,
|
||||
63.850006103515625,
|
||||
64.08000946044922,
|
||||
64.760009765625,
|
||||
64.80001068115234,
|
||||
64.81001281738281,
|
||||
64.81001281738281,
|
||||
],
|
||||
"short_factor": [
|
||||
1.05,
|
||||
1.05,
|
||||
1.05,
|
||||
1.1,
|
||||
1.1,
|
||||
1.1,
|
||||
1.2500000000000002,
|
||||
1.2500000000000002,
|
||||
1.4000000000000004,
|
||||
1.4500000000000004,
|
||||
1.5500000000000005,
|
||||
1.8500000000000008,
|
||||
1.9000000000000008,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.1000000000000005,
|
||||
2.1000000000000005,
|
||||
2.2,
|
||||
2.3499999999999996,
|
||||
2.3499999999999996,
|
||||
2.3499999999999996,
|
||||
2.3499999999999996,
|
||||
2.3999999999999995,
|
||||
2.3999999999999995,
|
||||
2.6499999999999986,
|
||||
2.6999999999999984,
|
||||
2.8999999999999977,
|
||||
2.9499999999999975,
|
||||
3.049999999999997,
|
||||
3.049999999999997,
|
||||
3.049999999999997,
|
||||
],
|
||||
"type": "su",
|
||||
},
|
||||
patch_size=2,
|
||||
in_channels=4,
|
||||
pos_embed_max_size=192,
|
||||
)
|
||||
transformer.load_state_dict(converted_state_dict, strict=True)
|
||||
transformer.to(torch.bfloat16)
|
||||
|
||||
num_model_params = sum(p.numel() for p in transformer.parameters())
|
||||
print(f"Total number of transformer parameters: {num_model_params}")
|
||||
|
||||
scheduler = FlowMatchEulerDiscreteScheduler(invert_sigmas=True, num_train_timesteps=1)
|
||||
|
||||
vae = AutoencoderKL.from_pretrained(os.path.join(args.origin_ckpt_path, "vae"), torch_dtype=torch.float32)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.origin_ckpt_path)
|
||||
|
||||
pipeline = OmniGenPipeline(tokenizer=tokenizer, transformer=transformer, vae=vae, scheduler=scheduler)
|
||||
pipeline.save_pretrained(args.dump_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"--origin_ckpt_path",
|
||||
default="Shitao/OmniGen-v1",
|
||||
type=str,
|
||||
required=False,
|
||||
help="Path to the checkpoint to convert.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--dump_path", default="OmniGen-v1-diffusers", type=str, required=False, help="Path to the output pipeline."
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
7
setup.py
7
setup.py
@@ -74,8 +74,9 @@ To create the package for PyPI.
|
||||
twine upload dist/* -r pypi
|
||||
|
||||
10. Prepare the release notes and publish them on GitHub once everything is looking hunky-dory. You can use the following
|
||||
Space to fetch all the commits applicable for the release: https://huggingface.co/spaces/lysandre/github-release. Repo should
|
||||
be `huggingface/diffusers`. `tag` should be the previous release tag (v0.26.1, for example), and `branch` should be
|
||||
Space to fetch all the commits applicable for the release: https://huggingface.co/spaces/sayakpaul/auto-release-notes-diffusers.
|
||||
It automatically fetches the correct tag and branch but also provides the option to configure them.
|
||||
`tag` should be the previous release tag (v0.26.1, for example), and `branch` should be
|
||||
the latest release branch (v0.27.0-release, for example). It denotes all commits that have happened on branch
|
||||
v0.27.0-release after the tag v0.26.1 was created.
|
||||
|
||||
@@ -130,6 +131,7 @@ _deps = [
|
||||
"regex!=2019.12.17",
|
||||
"requests",
|
||||
"tensorboard",
|
||||
"tiktoken>=0.7.0",
|
||||
"torch>=1.4",
|
||||
"torchvision",
|
||||
"transformers>=4.41.2",
|
||||
@@ -226,6 +228,7 @@ extras["test"] = deps_list(
|
||||
"safetensors",
|
||||
"sentencepiece",
|
||||
"scipy",
|
||||
"tiktoken",
|
||||
"torchvision",
|
||||
"transformers",
|
||||
"phonemizer",
|
||||
|
||||
@@ -101,6 +101,7 @@ else:
|
||||
"CacheMixin",
|
||||
"CogVideoXTransformer3DModel",
|
||||
"CogView3PlusTransformer2DModel",
|
||||
"CogView4Transformer2DModel",
|
||||
"ConsisIDTransformer3DModel",
|
||||
"ConsistencyDecoderVAE",
|
||||
"ControlNetModel",
|
||||
@@ -118,12 +119,14 @@ else:
|
||||
"Kandinsky3UNet",
|
||||
"LatteTransformer3DModel",
|
||||
"LTXVideoTransformer3DModel",
|
||||
"Lumina2Transformer2DModel",
|
||||
"LuminaNextDiT2DModel",
|
||||
"MochiTransformer3DModel",
|
||||
"ModelMixin",
|
||||
"MotionAdapter",
|
||||
"MultiAdapter",
|
||||
"MultiControlNetModel",
|
||||
"OmniGenTransformer2DModel",
|
||||
"PixArtTransformer2DModel",
|
||||
"PriorTransformer",
|
||||
"SanaTransformer2DModel",
|
||||
@@ -285,6 +288,7 @@ else:
|
||||
"CogVideoXPipeline",
|
||||
"CogVideoXVideoToVideoPipeline",
|
||||
"CogView3PlusPipeline",
|
||||
"CogView4Pipeline",
|
||||
"ConsisIDPipeline",
|
||||
"CycleDiffusionPipeline",
|
||||
"FluxControlImg2ImgPipeline",
|
||||
@@ -301,6 +305,7 @@ else:
|
||||
"HunyuanDiTControlNetPipeline",
|
||||
"HunyuanDiTPAGPipeline",
|
||||
"HunyuanDiTPipeline",
|
||||
"HunyuanSkyreelsImageToVideoPipeline",
|
||||
"HunyuanVideoPipeline",
|
||||
"I2VGenXLPipeline",
|
||||
"IFImg2ImgPipeline",
|
||||
@@ -337,11 +342,14 @@ else:
|
||||
"LEditsPPPipelineStableDiffusionXL",
|
||||
"LTXImageToVideoPipeline",
|
||||
"LTXPipeline",
|
||||
"Lumina2Text2ImgPipeline",
|
||||
"LuminaText2ImgPipeline",
|
||||
"MarigoldDepthPipeline",
|
||||
"MarigoldIntrinsicsPipeline",
|
||||
"MarigoldNormalsPipeline",
|
||||
"MochiPipeline",
|
||||
"MusicLDMPipeline",
|
||||
"OmniGenPipeline",
|
||||
"PaintByExamplePipeline",
|
||||
"PIAPipeline",
|
||||
"PixArtAlphaPipeline",
|
||||
@@ -615,6 +623,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
||||
CacheMixin,
|
||||
CogVideoXTransformer3DModel,
|
||||
CogView3PlusTransformer2DModel,
|
||||
CogView4Transformer2DModel,
|
||||
ConsisIDTransformer3DModel,
|
||||
ConsistencyDecoderVAE,
|
||||
ControlNetModel,
|
||||
@@ -632,12 +641,14 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
||||
Kandinsky3UNet,
|
||||
LatteTransformer3DModel,
|
||||
LTXVideoTransformer3DModel,
|
||||
Lumina2Transformer2DModel,
|
||||
LuminaNextDiT2DModel,
|
||||
MochiTransformer3DModel,
|
||||
ModelMixin,
|
||||
MotionAdapter,
|
||||
MultiAdapter,
|
||||
MultiControlNetModel,
|
||||
OmniGenTransformer2DModel,
|
||||
PixArtTransformer2DModel,
|
||||
PriorTransformer,
|
||||
SanaTransformer2DModel,
|
||||
@@ -778,6 +789,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
||||
CogVideoXPipeline,
|
||||
CogVideoXVideoToVideoPipeline,
|
||||
CogView3PlusPipeline,
|
||||
CogView4Pipeline,
|
||||
ConsisIDPipeline,
|
||||
CycleDiffusionPipeline,
|
||||
FluxControlImg2ImgPipeline,
|
||||
@@ -794,6 +806,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
||||
HunyuanDiTControlNetPipeline,
|
||||
HunyuanDiTPAGPipeline,
|
||||
HunyuanDiTPipeline,
|
||||
HunyuanSkyreelsImageToVideoPipeline,
|
||||
HunyuanVideoPipeline,
|
||||
I2VGenXLPipeline,
|
||||
IFImg2ImgPipeline,
|
||||
@@ -830,11 +843,14 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
||||
LEditsPPPipelineStableDiffusionXL,
|
||||
LTXImageToVideoPipeline,
|
||||
LTXPipeline,
|
||||
Lumina2Text2ImgPipeline,
|
||||
LuminaText2ImgPipeline,
|
||||
MarigoldDepthPipeline,
|
||||
MarigoldIntrinsicsPipeline,
|
||||
MarigoldNormalsPipeline,
|
||||
MochiPipeline,
|
||||
MusicLDMPipeline,
|
||||
OmniGenPipeline,
|
||||
PaintByExamplePipeline,
|
||||
PIAPipeline,
|
||||
PixArtAlphaPipeline,
|
||||
@@ -851,6 +867,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
||||
StableCascadeCombinedPipeline,
|
||||
StableCascadeDecoderPipeline,
|
||||
StableCascadePriorPipeline,
|
||||
StableDiffusion3ControlNetInpaintingPipeline,
|
||||
StableDiffusion3ControlNetPipeline,
|
||||
StableDiffusion3Img2ImgPipeline,
|
||||
StableDiffusion3InpaintPipeline,
|
||||
|
||||
@@ -38,6 +38,7 @@ deps = {
|
||||
"regex": "regex!=2019.12.17",
|
||||
"requests": "requests",
|
||||
"tensorboard": "tensorboard",
|
||||
"tiktoken": "tiktoken>=0.7.0",
|
||||
"torch": "torch>=1.4",
|
||||
"torchvision": "torchvision",
|
||||
"transformers": "transformers>=4.41.2",
|
||||
|
||||
@@ -2,6 +2,7 @@ from ..utils import is_torch_available
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
from .group_offloading import apply_group_offloading
|
||||
from .hooks import HookRegistry, ModelHook
|
||||
from .layerwise_casting import apply_layerwise_casting, apply_layerwise_casting_hook
|
||||
from .pyramid_attention_broadcast import PyramidAttentionBroadcastConfig, apply_pyramid_attention_broadcast
|
||||
|
||||
678
src/diffusers/hooks/group_offloading.py
Normal file
678
src/diffusers/hooks/group_offloading.py
Normal file
@@ -0,0 +1,678 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from contextlib import nullcontext
|
||||
from typing import Dict, List, Optional, Set, Tuple
|
||||
|
||||
import torch
|
||||
|
||||
from ..utils import get_logger, is_accelerate_available
|
||||
from .hooks import HookRegistry, ModelHook
|
||||
|
||||
|
||||
if is_accelerate_available():
|
||||
from accelerate.hooks import AlignDevicesHook, CpuOffload
|
||||
from accelerate.utils import send_to_device
|
||||
|
||||
|
||||
logger = get_logger(__name__) # pylint: disable=invalid-name
|
||||
|
||||
|
||||
# fmt: off
|
||||
_GROUP_OFFLOADING = "group_offloading"
|
||||
_LAYER_EXECUTION_TRACKER = "layer_execution_tracker"
|
||||
_LAZY_PREFETCH_GROUP_OFFLOADING = "lazy_prefetch_group_offloading"
|
||||
|
||||
_SUPPORTED_PYTORCH_LAYERS = (
|
||||
torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d,
|
||||
torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d,
|
||||
torch.nn.Linear,
|
||||
# TODO(aryan): look into torch.nn.LayerNorm, torch.nn.GroupNorm later, seems to be causing some issues with CogVideoX
|
||||
# because of double invocation of the same norm layer in CogVideoXLayerNorm
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
|
||||
class ModuleGroup:
|
||||
def __init__(
|
||||
self,
|
||||
modules: List[torch.nn.Module],
|
||||
offload_device: torch.device,
|
||||
onload_device: torch.device,
|
||||
offload_leader: torch.nn.Module,
|
||||
onload_leader: Optional[torch.nn.Module] = None,
|
||||
parameters: Optional[List[torch.nn.Parameter]] = None,
|
||||
buffers: Optional[List[torch.Tensor]] = None,
|
||||
non_blocking: bool = False,
|
||||
stream: Optional[torch.cuda.Stream] = None,
|
||||
cpu_param_dict: Optional[Dict[torch.nn.Parameter, torch.Tensor]] = None,
|
||||
onload_self: bool = True,
|
||||
) -> None:
|
||||
self.modules = modules
|
||||
self.offload_device = offload_device
|
||||
self.onload_device = onload_device
|
||||
self.offload_leader = offload_leader
|
||||
self.onload_leader = onload_leader
|
||||
self.parameters = parameters
|
||||
self.buffers = buffers
|
||||
self.non_blocking = non_blocking or stream is not None
|
||||
self.stream = stream
|
||||
self.cpu_param_dict = cpu_param_dict
|
||||
self.onload_self = onload_self
|
||||
|
||||
if self.stream is not None and self.cpu_param_dict is None:
|
||||
raise ValueError("cpu_param_dict must be provided when using stream for data transfer.")
|
||||
|
||||
def onload_(self):
|
||||
r"""Onloads the group of modules to the onload_device."""
|
||||
context = nullcontext() if self.stream is None else torch.cuda.stream(self.stream)
|
||||
if self.stream is not None:
|
||||
# Wait for previous Host->Device transfer to complete
|
||||
self.stream.synchronize()
|
||||
|
||||
with context:
|
||||
for group_module in self.modules:
|
||||
group_module.to(self.onload_device, non_blocking=self.non_blocking)
|
||||
if self.parameters is not None:
|
||||
for param in self.parameters:
|
||||
param.data = param.data.to(self.onload_device, non_blocking=self.non_blocking)
|
||||
if self.buffers is not None:
|
||||
for buffer in self.buffers:
|
||||
buffer.data = buffer.data.to(self.onload_device, non_blocking=self.non_blocking)
|
||||
|
||||
def offload_(self):
|
||||
r"""Offloads the group of modules to the offload_device."""
|
||||
if self.stream is not None:
|
||||
torch.cuda.current_stream().synchronize()
|
||||
for group_module in self.modules:
|
||||
for param in group_module.parameters():
|
||||
param.data = self.cpu_param_dict[param]
|
||||
else:
|
||||
for group_module in self.modules:
|
||||
group_module.to(self.offload_device, non_blocking=self.non_blocking)
|
||||
if self.parameters is not None:
|
||||
for param in self.parameters:
|
||||
param.data = param.data.to(self.offload_device, non_blocking=self.non_blocking)
|
||||
if self.buffers is not None:
|
||||
for buffer in self.buffers:
|
||||
buffer.data = buffer.data.to(self.offload_device, non_blocking=self.non_blocking)
|
||||
|
||||
|
||||
class GroupOffloadingHook(ModelHook):
|
||||
r"""
|
||||
A hook that offloads groups of torch.nn.Module to the CPU for storage and onloads to accelerator device for
|
||||
computation. Each group has one "onload leader" module that is responsible for onloading, and an "offload leader"
|
||||
module that is responsible for offloading. If prefetching is enabled, the onload leader of the previous module
|
||||
group is responsible for onloading the current module group.
|
||||
"""
|
||||
|
||||
_is_stateful = False
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
group: ModuleGroup,
|
||||
next_group: Optional[ModuleGroup] = None,
|
||||
) -> None:
|
||||
self.group = group
|
||||
self.next_group = next_group
|
||||
|
||||
def initialize_hook(self, module: torch.nn.Module) -> torch.nn.Module:
|
||||
if self.group.offload_leader == module:
|
||||
self.group.offload_()
|
||||
return module
|
||||
|
||||
def pre_forward(self, module: torch.nn.Module, *args, **kwargs):
|
||||
# If there wasn't an onload_leader assigned, we assume that the submodule that first called its forward
|
||||
# method is the onload_leader of the group.
|
||||
if self.group.onload_leader is None:
|
||||
self.group.onload_leader = module
|
||||
|
||||
# If the current module is the onload_leader of the group, we onload the group if it is supposed
|
||||
# to onload itself. In the case of using prefetching with streams, we onload the next group if
|
||||
# it is not supposed to onload itself.
|
||||
if self.group.onload_leader == module:
|
||||
if self.group.onload_self:
|
||||
self.group.onload_()
|
||||
if self.next_group is not None and not self.next_group.onload_self:
|
||||
self.next_group.onload_()
|
||||
|
||||
args = send_to_device(args, self.group.onload_device, non_blocking=self.group.non_blocking)
|
||||
kwargs = send_to_device(kwargs, self.group.onload_device, non_blocking=self.group.non_blocking)
|
||||
return args, kwargs
|
||||
|
||||
def post_forward(self, module: torch.nn.Module, output):
|
||||
if self.group.offload_leader == module:
|
||||
self.group.offload_()
|
||||
return output
|
||||
|
||||
|
||||
class LazyPrefetchGroupOffloadingHook(ModelHook):
|
||||
r"""
|
||||
A hook, used in conjuction with GroupOffloadingHook, that applies lazy prefetching to groups of torch.nn.Module.
|
||||
This hook is used to determine the order in which the layers are executed during the forward pass. Once the layer
|
||||
invocation order is known, assignments of the next_group attribute for prefetching can be made, which allows
|
||||
prefetching groups in the correct order.
|
||||
"""
|
||||
|
||||
_is_stateful = False
|
||||
|
||||
def __init__(self):
|
||||
self.execution_order: List[Tuple[str, torch.nn.Module]] = []
|
||||
self._layer_execution_tracker_module_names = set()
|
||||
|
||||
def initialize_hook(self, module):
|
||||
# To every submodule that contains a group offloading hook (at this point, no prefetching is enabled for any
|
||||
# of the groups), we add a layer execution tracker hook that will be used to determine the order in which the
|
||||
# layers are executed during the forward pass.
|
||||
for name, submodule in module.named_modules():
|
||||
if name == "" or not hasattr(submodule, "_diffusers_hook"):
|
||||
continue
|
||||
|
||||
registry = HookRegistry.check_if_exists_or_initialize(submodule)
|
||||
group_offloading_hook = registry.get_hook(_GROUP_OFFLOADING)
|
||||
|
||||
if group_offloading_hook is not None:
|
||||
|
||||
def make_execution_order_update_callback(current_name, current_submodule):
|
||||
def callback():
|
||||
logger.debug(f"Adding {current_name} to the execution order")
|
||||
self.execution_order.append((current_name, current_submodule))
|
||||
|
||||
return callback
|
||||
|
||||
layer_tracker_hook = LayerExecutionTrackerHook(make_execution_order_update_callback(name, submodule))
|
||||
registry.register_hook(layer_tracker_hook, _LAYER_EXECUTION_TRACKER)
|
||||
self._layer_execution_tracker_module_names.add(name)
|
||||
|
||||
return module
|
||||
|
||||
def post_forward(self, module, output):
|
||||
# At this point, for the current modules' submodules, we know the execution order of the layers. We can now
|
||||
# remove the layer execution tracker hooks and apply prefetching by setting the next_group attribute for each
|
||||
# group offloading hook.
|
||||
num_executed = len(self.execution_order)
|
||||
execution_order_module_names = {name for name, _ in self.execution_order}
|
||||
|
||||
# It may be possible that some layers were not executed during the forward pass. This can happen if the layer
|
||||
# is not used in the forward pass, or if the layer is not executed due to some other reason. In such cases, we
|
||||
# may not be able to apply prefetching in the correct order, which can lead to device-mismatch related errors
|
||||
# if the missing layers end up being executed in the future.
|
||||
if execution_order_module_names != self._layer_execution_tracker_module_names:
|
||||
unexecuted_layers = list(self._layer_execution_tracker_module_names - execution_order_module_names)
|
||||
logger.warning(
|
||||
"It seems like some layers were not executed during the forward pass. This may lead to problems when "
|
||||
"applying lazy prefetching with automatic tracing and lead to device-mismatch related errors. Please "
|
||||
"make sure that all layers are executed during the forward pass. The following layers were not executed:\n"
|
||||
f"{unexecuted_layers=}"
|
||||
)
|
||||
|
||||
# Remove the layer execution tracker hooks from the submodules
|
||||
base_module_registry = module._diffusers_hook
|
||||
registries = [submodule._diffusers_hook for _, submodule in self.execution_order]
|
||||
|
||||
for i in range(num_executed):
|
||||
registries[i].remove_hook(_LAYER_EXECUTION_TRACKER, recurse=False)
|
||||
|
||||
# Remove the current lazy prefetch group offloading hook so that it doesn't interfere with the next forward pass
|
||||
base_module_registry.remove_hook(_LAZY_PREFETCH_GROUP_OFFLOADING, recurse=False)
|
||||
|
||||
# Apply lazy prefetching by setting required attributes
|
||||
group_offloading_hooks = [registry.get_hook(_GROUP_OFFLOADING) for registry in registries]
|
||||
if num_executed > 0:
|
||||
base_module_group_offloading_hook = base_module_registry.get_hook(_GROUP_OFFLOADING)
|
||||
base_module_group_offloading_hook.next_group = group_offloading_hooks[0].group
|
||||
base_module_group_offloading_hook.next_group.onload_self = False
|
||||
|
||||
for i in range(num_executed - 1):
|
||||
name1, _ = self.execution_order[i]
|
||||
name2, _ = self.execution_order[i + 1]
|
||||
logger.debug(f"Applying lazy prefetch group offloading from {name1} to {name2}")
|
||||
group_offloading_hooks[i].next_group = group_offloading_hooks[i + 1].group
|
||||
group_offloading_hooks[i].next_group.onload_self = False
|
||||
|
||||
return output
|
||||
|
||||
|
||||
class LayerExecutionTrackerHook(ModelHook):
|
||||
r"""
|
||||
A hook that tracks the order in which the layers are executed during the forward pass by calling back to the
|
||||
LazyPrefetchGroupOffloadingHook to update the execution order.
|
||||
"""
|
||||
|
||||
_is_stateful = False
|
||||
|
||||
def __init__(self, execution_order_update_callback):
|
||||
self.execution_order_update_callback = execution_order_update_callback
|
||||
|
||||
def pre_forward(self, module, *args, **kwargs):
|
||||
self.execution_order_update_callback()
|
||||
return args, kwargs
|
||||
|
||||
|
||||
def apply_group_offloading(
|
||||
module: torch.nn.Module,
|
||||
onload_device: torch.device,
|
||||
offload_device: torch.device = torch.device("cpu"),
|
||||
offload_type: str = "block_level",
|
||||
num_blocks_per_group: Optional[int] = None,
|
||||
non_blocking: bool = False,
|
||||
use_stream: bool = False,
|
||||
) -> None:
|
||||
r"""
|
||||
Applies group offloading to the internal layers of a torch.nn.Module. To understand what group offloading is, and
|
||||
where it is beneficial, we need to first provide some context on how other supported offloading methods work.
|
||||
|
||||
Typically, offloading is done at two levels:
|
||||
- Module-level: In Diffusers, this can be enabled using the `ModelMixin::enable_model_cpu_offload()` method. It
|
||||
works by offloading each component of a pipeline to the CPU for storage, and onloading to the accelerator device
|
||||
when needed for computation. This method is more memory-efficient than keeping all components on the accelerator,
|
||||
but the memory requirements are still quite high. For this method to work, one needs memory equivalent to size of
|
||||
the model in runtime dtype + size of largest intermediate activation tensors to be able to complete the forward
|
||||
pass.
|
||||
- Leaf-level: In Diffusers, this can be enabled using the `ModelMixin::enable_sequential_cpu_offload()` method. It
|
||||
works by offloading the lowest leaf-level parameters of the computation graph to the CPU for storage, and
|
||||
onloading only the leafs to the accelerator device for computation. This uses the lowest amount of accelerator
|
||||
memory, but can be slower due to the excessive number of device synchronizations.
|
||||
|
||||
Group offloading is a middle ground between the two methods. It works by offloading groups of internal layers,
|
||||
(either `torch.nn.ModuleList` or `torch.nn.Sequential`). This method uses lower memory than module-level
|
||||
offloading. It is also faster than leaf-level/sequential offloading, as the number of device synchronizations is
|
||||
reduced.
|
||||
|
||||
Another supported feature (for CUDA devices with support for asynchronous data transfer streams) is the ability to
|
||||
overlap data transfer and computation to reduce the overall execution time compared to sequential offloading. This
|
||||
is enabled using layer prefetching with streams, i.e., the layer that is to be executed next starts onloading to
|
||||
the accelerator device while the current layer is being executed - this increases the memory requirements slightly.
|
||||
Note that this implementation also supports leaf-level offloading but can be made much faster when using streams.
|
||||
|
||||
Args:
|
||||
module (`torch.nn.Module`):
|
||||
The module to which group offloading is applied.
|
||||
onload_device (`torch.device`):
|
||||
The device to which the group of modules are onloaded.
|
||||
offload_device (`torch.device`, defaults to `torch.device("cpu")`):
|
||||
The device to which the group of modules are offloaded. This should typically be the CPU. Default is CPU.
|
||||
offload_type (`str`, defaults to "block_level"):
|
||||
The type of offloading to be applied. Can be one of "block_level" or "leaf_level". Default is
|
||||
"block_level".
|
||||
num_blocks_per_group (`int`, *optional*):
|
||||
The number of blocks per group when using offload_type="block_level". This is required when using
|
||||
offload_type="block_level".
|
||||
non_blocking (`bool`, defaults to `False`):
|
||||
If True, offloading and onloading is done with non-blocking data transfer.
|
||||
use_stream (`bool`, defaults to `False`):
|
||||
If True, offloading and onloading is done asynchronously using a CUDA stream. This can be useful for
|
||||
overlapping computation and data transfer.
|
||||
|
||||
Example:
|
||||
```python
|
||||
>>> from diffusers import CogVideoXTransformer3DModel
|
||||
>>> from diffusers.hooks import apply_group_offloading
|
||||
|
||||
>>> transformer = CogVideoXTransformer3DModel.from_pretrained(
|
||||
... "THUDM/CogVideoX-5b", subfolder="transformer", torch_dtype=torch.bfloat16
|
||||
... )
|
||||
|
||||
>>> apply_group_offloading(
|
||||
... transformer,
|
||||
... onload_device=torch.device("cuda"),
|
||||
... offload_device=torch.device("cpu"),
|
||||
... offload_type="block_level",
|
||||
... num_blocks_per_group=2,
|
||||
... use_stream=True,
|
||||
... )
|
||||
```
|
||||
"""
|
||||
|
||||
stream = None
|
||||
if use_stream:
|
||||
if torch.cuda.is_available():
|
||||
stream = torch.cuda.Stream()
|
||||
else:
|
||||
raise ValueError("Using streams for data transfer requires a CUDA device.")
|
||||
|
||||
_raise_error_if_accelerate_model_or_sequential_hook_present(module)
|
||||
|
||||
if offload_type == "block_level":
|
||||
if num_blocks_per_group is None:
|
||||
raise ValueError("num_blocks_per_group must be provided when using offload_type='block_level'.")
|
||||
|
||||
_apply_group_offloading_block_level(
|
||||
module, num_blocks_per_group, offload_device, onload_device, non_blocking, stream
|
||||
)
|
||||
elif offload_type == "leaf_level":
|
||||
_apply_group_offloading_leaf_level(module, offload_device, onload_device, non_blocking, stream)
|
||||
else:
|
||||
raise ValueError(f"Unsupported offload_type: {offload_type}")
|
||||
|
||||
|
||||
def _apply_group_offloading_block_level(
|
||||
module: torch.nn.Module,
|
||||
num_blocks_per_group: int,
|
||||
offload_device: torch.device,
|
||||
onload_device: torch.device,
|
||||
non_blocking: bool,
|
||||
stream: Optional[torch.cuda.Stream] = None,
|
||||
) -> None:
|
||||
r"""
|
||||
This function applies offloading to groups of torch.nn.ModuleList or torch.nn.Sequential blocks. In comparison to
|
||||
the "leaf_level" offloading, which is more fine-grained, this offloading is done at the top-level blocks.
|
||||
|
||||
Args:
|
||||
module (`torch.nn.Module`):
|
||||
The module to which group offloading is applied.
|
||||
offload_device (`torch.device`):
|
||||
The device to which the group of modules are offloaded. This should typically be the CPU.
|
||||
onload_device (`torch.device`):
|
||||
The device to which the group of modules are onloaded.
|
||||
non_blocking (`bool`):
|
||||
If True, offloading and onloading is done asynchronously. This can be useful for overlapping computation
|
||||
and data transfer.
|
||||
stream (`torch.cuda.Stream`, *optional*):
|
||||
If provided, offloading and onloading is done asynchronously using the provided stream. This can be useful
|
||||
for overlapping computation and data transfer.
|
||||
"""
|
||||
|
||||
# Create a pinned CPU parameter dict for async data transfer if streams are to be used
|
||||
cpu_param_dict = None
|
||||
if stream is not None:
|
||||
for param in module.parameters():
|
||||
param.data = param.data.cpu().pin_memory()
|
||||
cpu_param_dict = {param: param.data for param in module.parameters()}
|
||||
|
||||
# Create module groups for ModuleList and Sequential blocks
|
||||
modules_with_group_offloading = set()
|
||||
unmatched_modules = []
|
||||
matched_module_groups = []
|
||||
for name, submodule in module.named_children():
|
||||
if not isinstance(submodule, (torch.nn.ModuleList, torch.nn.Sequential)):
|
||||
unmatched_modules.append((name, submodule))
|
||||
modules_with_group_offloading.add(name)
|
||||
continue
|
||||
|
||||
for i in range(0, len(submodule), num_blocks_per_group):
|
||||
current_modules = submodule[i : i + num_blocks_per_group]
|
||||
group = ModuleGroup(
|
||||
modules=current_modules,
|
||||
offload_device=offload_device,
|
||||
onload_device=onload_device,
|
||||
offload_leader=current_modules[-1],
|
||||
onload_leader=current_modules[0],
|
||||
non_blocking=non_blocking,
|
||||
stream=stream,
|
||||
cpu_param_dict=cpu_param_dict,
|
||||
onload_self=stream is None,
|
||||
)
|
||||
matched_module_groups.append(group)
|
||||
for j in range(i, i + len(current_modules)):
|
||||
modules_with_group_offloading.add(f"{name}.{j}")
|
||||
|
||||
# Apply group offloading hooks to the module groups
|
||||
for i, group in enumerate(matched_module_groups):
|
||||
next_group = (
|
||||
matched_module_groups[i + 1] if i + 1 < len(matched_module_groups) and stream is not None else None
|
||||
)
|
||||
|
||||
for group_module in group.modules:
|
||||
_apply_group_offloading_hook(group_module, group, next_group)
|
||||
|
||||
# Parameters and Buffers of the top-level module need to be offloaded/onloaded separately
|
||||
# when the forward pass of this module is called. This is because the top-level module is not
|
||||
# part of any group (as doing so would lead to no VRAM savings).
|
||||
parameters = _gather_parameters_with_no_group_offloading_parent(module, modules_with_group_offloading)
|
||||
buffers = _gather_buffers_with_no_group_offloading_parent(module, modules_with_group_offloading)
|
||||
parameters = [param for _, param in parameters]
|
||||
buffers = [buffer for _, buffer in buffers]
|
||||
|
||||
# Create a group for the unmatched submodules of the top-level module so that they are on the correct
|
||||
# device when the forward pass is called.
|
||||
unmatched_modules = [unmatched_module for _, unmatched_module in unmatched_modules]
|
||||
unmatched_group = ModuleGroup(
|
||||
modules=unmatched_modules,
|
||||
offload_device=offload_device,
|
||||
onload_device=onload_device,
|
||||
offload_leader=module,
|
||||
onload_leader=module,
|
||||
parameters=parameters,
|
||||
buffers=buffers,
|
||||
non_blocking=False,
|
||||
stream=None,
|
||||
cpu_param_dict=None,
|
||||
onload_self=True,
|
||||
)
|
||||
next_group = matched_module_groups[0] if len(matched_module_groups) > 0 else None
|
||||
_apply_group_offloading_hook(module, unmatched_group, next_group)
|
||||
|
||||
|
||||
def _apply_group_offloading_leaf_level(
|
||||
module: torch.nn.Module,
|
||||
offload_device: torch.device,
|
||||
onload_device: torch.device,
|
||||
non_blocking: bool,
|
||||
stream: Optional[torch.cuda.Stream] = None,
|
||||
) -> None:
|
||||
r"""
|
||||
This function applies offloading to groups of leaf modules in a torch.nn.Module. This method has minimal memory
|
||||
requirements. However, it can be slower compared to other offloading methods due to the excessive number of device
|
||||
synchronizations. When using devices that support streams to overlap data transfer and computation, this method can
|
||||
reduce memory usage without any performance degradation.
|
||||
|
||||
Args:
|
||||
module (`torch.nn.Module`):
|
||||
The module to which group offloading is applied.
|
||||
offload_device (`torch.device`):
|
||||
The device to which the group of modules are offloaded. This should typically be the CPU.
|
||||
onload_device (`torch.device`):
|
||||
The device to which the group of modules are onloaded.
|
||||
non_blocking (`bool`):
|
||||
If True, offloading and onloading is done asynchronously. This can be useful for overlapping computation
|
||||
and data transfer.
|
||||
stream (`torch.cuda.Stream`, *optional*):
|
||||
If provided, offloading and onloading is done asynchronously using the provided stream. This can be useful
|
||||
for overlapping computation and data transfer.
|
||||
"""
|
||||
|
||||
# Create a pinned CPU parameter dict for async data transfer if streams are to be used
|
||||
cpu_param_dict = None
|
||||
if stream is not None:
|
||||
for param in module.parameters():
|
||||
param.data = param.data.cpu().pin_memory()
|
||||
cpu_param_dict = {param: param.data for param in module.parameters()}
|
||||
|
||||
# Create module groups for leaf modules and apply group offloading hooks
|
||||
modules_with_group_offloading = set()
|
||||
for name, submodule in module.named_modules():
|
||||
if not isinstance(submodule, _SUPPORTED_PYTORCH_LAYERS):
|
||||
continue
|
||||
group = ModuleGroup(
|
||||
modules=[submodule],
|
||||
offload_device=offload_device,
|
||||
onload_device=onload_device,
|
||||
offload_leader=submodule,
|
||||
onload_leader=submodule,
|
||||
non_blocking=non_blocking,
|
||||
stream=stream,
|
||||
cpu_param_dict=cpu_param_dict,
|
||||
onload_self=True,
|
||||
)
|
||||
_apply_group_offloading_hook(submodule, group, None)
|
||||
modules_with_group_offloading.add(name)
|
||||
|
||||
# Parameters and Buffers at all non-leaf levels need to be offloaded/onloaded separately when the forward pass
|
||||
# of the module is called
|
||||
module_dict = dict(module.named_modules())
|
||||
parameters = _gather_parameters_with_no_group_offloading_parent(module, modules_with_group_offloading)
|
||||
buffers = _gather_buffers_with_no_group_offloading_parent(module, modules_with_group_offloading)
|
||||
|
||||
# Find closest module parent for each parameter and buffer, and attach group hooks
|
||||
parent_to_parameters = {}
|
||||
for name, param in parameters:
|
||||
parent_name = _find_parent_module_in_module_dict(name, module_dict)
|
||||
if parent_name in parent_to_parameters:
|
||||
parent_to_parameters[parent_name].append(param)
|
||||
else:
|
||||
parent_to_parameters[parent_name] = [param]
|
||||
|
||||
parent_to_buffers = {}
|
||||
for name, buffer in buffers:
|
||||
parent_name = _find_parent_module_in_module_dict(name, module_dict)
|
||||
if parent_name in parent_to_buffers:
|
||||
parent_to_buffers[parent_name].append(buffer)
|
||||
else:
|
||||
parent_to_buffers[parent_name] = [buffer]
|
||||
|
||||
parent_names = set(parent_to_parameters.keys()) | set(parent_to_buffers.keys())
|
||||
for name in parent_names:
|
||||
parameters = parent_to_parameters.get(name, [])
|
||||
buffers = parent_to_buffers.get(name, [])
|
||||
parent_module = module_dict[name]
|
||||
assert getattr(parent_module, "_diffusers_hook", None) is None
|
||||
group = ModuleGroup(
|
||||
modules=[],
|
||||
offload_device=offload_device,
|
||||
onload_device=onload_device,
|
||||
offload_leader=parent_module,
|
||||
onload_leader=parent_module,
|
||||
parameters=parameters,
|
||||
buffers=buffers,
|
||||
non_blocking=non_blocking,
|
||||
stream=stream,
|
||||
cpu_param_dict=cpu_param_dict,
|
||||
onload_self=True,
|
||||
)
|
||||
_apply_group_offloading_hook(parent_module, group, None)
|
||||
|
||||
if stream is not None:
|
||||
# When using streams, we need to know the layer execution order for applying prefetching (to overlap data transfer
|
||||
# and computation). Since we don't know the order beforehand, we apply a lazy prefetching hook that will find the
|
||||
# execution order and apply prefetching in the correct order.
|
||||
unmatched_group = ModuleGroup(
|
||||
modules=[],
|
||||
offload_device=offload_device,
|
||||
onload_device=onload_device,
|
||||
offload_leader=module,
|
||||
onload_leader=module,
|
||||
parameters=None,
|
||||
buffers=None,
|
||||
non_blocking=False,
|
||||
stream=None,
|
||||
cpu_param_dict=None,
|
||||
onload_self=True,
|
||||
)
|
||||
_apply_lazy_group_offloading_hook(module, unmatched_group, None)
|
||||
|
||||
|
||||
def _apply_group_offloading_hook(
|
||||
module: torch.nn.Module,
|
||||
group: ModuleGroup,
|
||||
next_group: Optional[ModuleGroup] = None,
|
||||
) -> None:
|
||||
registry = HookRegistry.check_if_exists_or_initialize(module)
|
||||
|
||||
# We may have already registered a group offloading hook if the module had a torch.nn.Parameter whose parent
|
||||
# is the current module. In such cases, we don't want to overwrite the existing group offloading hook.
|
||||
if registry.get_hook(_GROUP_OFFLOADING) is None:
|
||||
hook = GroupOffloadingHook(group, next_group)
|
||||
registry.register_hook(hook, _GROUP_OFFLOADING)
|
||||
|
||||
|
||||
def _apply_lazy_group_offloading_hook(
|
||||
module: torch.nn.Module,
|
||||
group: ModuleGroup,
|
||||
next_group: Optional[ModuleGroup] = None,
|
||||
) -> None:
|
||||
registry = HookRegistry.check_if_exists_or_initialize(module)
|
||||
|
||||
# We may have already registered a group offloading hook if the module had a torch.nn.Parameter whose parent
|
||||
# is the current module. In such cases, we don't want to overwrite the existing group offloading hook.
|
||||
if registry.get_hook(_GROUP_OFFLOADING) is None:
|
||||
hook = GroupOffloadingHook(group, next_group)
|
||||
registry.register_hook(hook, _GROUP_OFFLOADING)
|
||||
|
||||
lazy_prefetch_hook = LazyPrefetchGroupOffloadingHook()
|
||||
registry.register_hook(lazy_prefetch_hook, _LAZY_PREFETCH_GROUP_OFFLOADING)
|
||||
|
||||
|
||||
def _gather_parameters_with_no_group_offloading_parent(
|
||||
module: torch.nn.Module, modules_with_group_offloading: Set[str]
|
||||
) -> List[torch.nn.Parameter]:
|
||||
parameters = []
|
||||
for name, parameter in module.named_parameters():
|
||||
has_parent_with_group_offloading = False
|
||||
atoms = name.split(".")
|
||||
while len(atoms) > 0:
|
||||
parent_name = ".".join(atoms)
|
||||
if parent_name in modules_with_group_offloading:
|
||||
has_parent_with_group_offloading = True
|
||||
break
|
||||
atoms.pop()
|
||||
if not has_parent_with_group_offloading:
|
||||
parameters.append((name, parameter))
|
||||
return parameters
|
||||
|
||||
|
||||
def _gather_buffers_with_no_group_offloading_parent(
|
||||
module: torch.nn.Module, modules_with_group_offloading: Set[str]
|
||||
) -> List[torch.Tensor]:
|
||||
buffers = []
|
||||
for name, buffer in module.named_buffers():
|
||||
has_parent_with_group_offloading = False
|
||||
atoms = name.split(".")
|
||||
while len(atoms) > 0:
|
||||
parent_name = ".".join(atoms)
|
||||
if parent_name in modules_with_group_offloading:
|
||||
has_parent_with_group_offloading = True
|
||||
break
|
||||
atoms.pop()
|
||||
if not has_parent_with_group_offloading:
|
||||
buffers.append((name, buffer))
|
||||
return buffers
|
||||
|
||||
|
||||
def _find_parent_module_in_module_dict(name: str, module_dict: Dict[str, torch.nn.Module]) -> str:
|
||||
atoms = name.split(".")
|
||||
while len(atoms) > 0:
|
||||
parent_name = ".".join(atoms)
|
||||
if parent_name in module_dict:
|
||||
return parent_name
|
||||
atoms.pop()
|
||||
return ""
|
||||
|
||||
|
||||
def _raise_error_if_accelerate_model_or_sequential_hook_present(module: torch.nn.Module) -> None:
|
||||
if not is_accelerate_available():
|
||||
return
|
||||
for name, submodule in module.named_modules():
|
||||
if not hasattr(submodule, "_hf_hook"):
|
||||
continue
|
||||
if isinstance(submodule._hf_hook, (AlignDevicesHook, CpuOffload)):
|
||||
raise ValueError(
|
||||
f"Cannot apply group offloading to a module that is already applying an alternative "
|
||||
f"offloading strategy from Accelerate. If you want to apply group offloading, please "
|
||||
f"disable the existing offloading strategy first. Offending module: {name} ({type(submodule)})"
|
||||
)
|
||||
|
||||
|
||||
def _is_group_offload_enabled(module: torch.nn.Module) -> bool:
|
||||
for submodule in module.modules():
|
||||
if hasattr(submodule, "_diffusers_hook") and submodule._diffusers_hook.get_hook(_GROUP_OFFLOADING) is not None:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _get_group_onload_device(module: torch.nn.Module) -> torch.device:
|
||||
for submodule in module.modules():
|
||||
if hasattr(submodule, "_diffusers_hook") and submodule._diffusers_hook.get_hook(_GROUP_OFFLOADING) is not None:
|
||||
return submodule._diffusers_hook.get_hook(_GROUP_OFFLOADING).group.onload_device
|
||||
raise ValueError("Group offloading is not enabled for the provided module.")
|
||||
@@ -17,7 +17,7 @@ from typing import Optional, Tuple, Type, Union
|
||||
|
||||
import torch
|
||||
|
||||
from ..utils import get_logger
|
||||
from ..utils import get_logger, is_peft_available, is_peft_version
|
||||
from .hooks import HookRegistry, ModelHook
|
||||
|
||||
|
||||
@@ -25,6 +25,8 @@ logger = get_logger(__name__) # pylint: disable=invalid-name
|
||||
|
||||
|
||||
# fmt: off
|
||||
_LAYERWISE_CASTING_HOOK = "layerwise_casting"
|
||||
_PEFT_AUTOCAST_DISABLE_HOOK = "peft_autocast_disable"
|
||||
SUPPORTED_PYTORCH_LAYERS = (
|
||||
torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d,
|
||||
torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d,
|
||||
@@ -34,6 +36,11 @@ SUPPORTED_PYTORCH_LAYERS = (
|
||||
DEFAULT_SKIP_MODULES_PATTERN = ("pos_embed", "patch_embed", "norm", "^proj_in$", "^proj_out$")
|
||||
# fmt: on
|
||||
|
||||
_SHOULD_DISABLE_PEFT_INPUT_AUTOCAST = is_peft_available() and is_peft_version(">", "0.14.0")
|
||||
if _SHOULD_DISABLE_PEFT_INPUT_AUTOCAST:
|
||||
from peft.helpers import disable_input_dtype_casting
|
||||
from peft.tuners.tuners_utils import BaseTunerLayer
|
||||
|
||||
|
||||
class LayerwiseCastingHook(ModelHook):
|
||||
r"""
|
||||
@@ -70,6 +77,32 @@ class LayerwiseCastingHook(ModelHook):
|
||||
return output
|
||||
|
||||
|
||||
class PeftInputAutocastDisableHook(ModelHook):
|
||||
r"""
|
||||
A hook that disables the casting of inputs to the module weight dtype during the forward pass. By default, PEFT
|
||||
casts the inputs to the weight dtype of the module, which can lead to precision loss.
|
||||
|
||||
The reasons for needing this are:
|
||||
- If we don't add PEFT layers' weight names to `skip_modules_pattern` when applying layerwise casting, the
|
||||
inputs will be casted to the, possibly lower precision, storage dtype. Reference:
|
||||
https://github.com/huggingface/peft/blob/0facdebf6208139cbd8f3586875acb378813dd97/src/peft/tuners/lora/layer.py#L706
|
||||
- We can, on our end, use something like accelerate's `send_to_device` but for dtypes. This way, we can ensure
|
||||
that the inputs are casted to the computation dtype correctly always. However, there are two goals we are
|
||||
hoping to achieve:
|
||||
1. Making forward implementations independent of device/dtype casting operations as much as possible.
|
||||
2. Peforming inference without losing information from casting to different precisions. With the current
|
||||
PEFT implementation (as linked in the reference above), and assuming running layerwise casting inference
|
||||
with storage_dtype=torch.float8_e4m3fn and compute_dtype=torch.bfloat16, inputs are cast to
|
||||
torch.float8_e4m3fn in the lora layer. We will then upcast back to torch.bfloat16 when we continue the
|
||||
forward pass in PEFT linear forward or Diffusers layer forward, with a `send_to_dtype` operation from
|
||||
LayerwiseCastingHook. This will be a lossy operation and result in poorer generation quality.
|
||||
"""
|
||||
|
||||
def new_forward(self, module: torch.nn.Module, *args, **kwargs):
|
||||
with disable_input_dtype_casting(module):
|
||||
return self.fn_ref.original_forward(*args, **kwargs)
|
||||
|
||||
|
||||
def apply_layerwise_casting(
|
||||
module: torch.nn.Module,
|
||||
storage_dtype: torch.dtype,
|
||||
@@ -134,6 +167,7 @@ def apply_layerwise_casting(
|
||||
skip_modules_classes,
|
||||
non_blocking,
|
||||
)
|
||||
_disable_peft_input_autocast(module)
|
||||
|
||||
|
||||
def _apply_layerwise_casting(
|
||||
@@ -188,4 +222,24 @@ def apply_layerwise_casting_hook(
|
||||
"""
|
||||
registry = HookRegistry.check_if_exists_or_initialize(module)
|
||||
hook = LayerwiseCastingHook(storage_dtype, compute_dtype, non_blocking)
|
||||
registry.register_hook(hook, "layerwise_casting")
|
||||
registry.register_hook(hook, _LAYERWISE_CASTING_HOOK)
|
||||
|
||||
|
||||
def _is_layerwise_casting_active(module: torch.nn.Module) -> bool:
|
||||
for submodule in module.modules():
|
||||
if (
|
||||
hasattr(submodule, "_diffusers_hook")
|
||||
and submodule._diffusers_hook.get_hook(_LAYERWISE_CASTING_HOOK) is not None
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _disable_peft_input_autocast(module: torch.nn.Module) -> None:
|
||||
if not _SHOULD_DISABLE_PEFT_INPUT_AUTOCAST:
|
||||
return
|
||||
for submodule in module.modules():
|
||||
if isinstance(submodule, BaseTunerLayer) and _is_layerwise_casting_active(submodule):
|
||||
registry = HookRegistry.check_if_exists_or_initialize(submodule)
|
||||
hook = PeftInputAutocastDisableHook()
|
||||
registry.register_hook(hook, _PEFT_AUTOCAST_DISABLE_HOOK)
|
||||
|
||||
@@ -73,6 +73,7 @@ if is_torch_available():
|
||||
"Mochi1LoraLoaderMixin",
|
||||
"HunyuanVideoLoraLoaderMixin",
|
||||
"SanaLoraLoaderMixin",
|
||||
"Lumina2LoraLoaderMixin",
|
||||
]
|
||||
_import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
|
||||
_import_structure["ip_adapter"] = [
|
||||
@@ -105,6 +106,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
||||
HunyuanVideoLoraLoaderMixin,
|
||||
LoraLoaderMixin,
|
||||
LTXVideoLoraLoaderMixin,
|
||||
Lumina2LoraLoaderMixin,
|
||||
Mochi1LoraLoaderMixin,
|
||||
SanaLoraLoaderMixin,
|
||||
SD3LoraLoaderMixin,
|
||||
|
||||
@@ -23,7 +23,9 @@ from safetensors import safe_open
|
||||
from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_state_dict
|
||||
from ..utils import (
|
||||
USE_PEFT_BACKEND,
|
||||
_get_detailed_type,
|
||||
_get_model_file,
|
||||
_is_valid_type,
|
||||
is_accelerate_available,
|
||||
is_torch_version,
|
||||
is_transformers_available,
|
||||
@@ -577,29 +579,36 @@ class FluxIPAdapterMixin:
|
||||
pipeline.set_ip_adapter_scale(ip_strengths)
|
||||
```
|
||||
"""
|
||||
transformer = self.transformer
|
||||
if not isinstance(scale, list):
|
||||
scale = [[scale] * transformer.config.num_layers]
|
||||
elif isinstance(scale, list) and isinstance(scale[0], int) or isinstance(scale[0], float):
|
||||
if len(scale) != transformer.config.num_layers:
|
||||
raise ValueError(f"Expected list of {transformer.config.num_layers} scales, got {len(scale)}.")
|
||||
|
||||
scale_type = Union[int, float]
|
||||
num_ip_adapters = self.transformer.encoder_hid_proj.num_ip_adapters
|
||||
num_layers = self.transformer.config.num_layers
|
||||
|
||||
# Single value for all layers of all IP-Adapters
|
||||
if isinstance(scale, scale_type):
|
||||
scale = [scale for _ in range(num_ip_adapters)]
|
||||
# List of per-layer scales for a single IP-Adapter
|
||||
elif _is_valid_type(scale, List[scale_type]) and num_ip_adapters == 1:
|
||||
scale = [scale]
|
||||
# Invalid scale type
|
||||
elif not _is_valid_type(scale, List[Union[scale_type, List[scale_type]]]):
|
||||
raise TypeError(f"Unexpected type {_get_detailed_type(scale)} for scale.")
|
||||
|
||||
scale_configs = scale
|
||||
if len(scale) != num_ip_adapters:
|
||||
raise ValueError(f"Cannot assign {len(scale)} scales to {num_ip_adapters} IP-Adapters.")
|
||||
|
||||
key_id = 0
|
||||
for attn_name, attn_processor in transformer.attn_processors.items():
|
||||
if isinstance(attn_processor, (FluxIPAdapterJointAttnProcessor2_0)):
|
||||
if len(scale_configs) != len(attn_processor.scale):
|
||||
raise ValueError(
|
||||
f"Cannot assign {len(scale_configs)} scale_configs to "
|
||||
f"{len(attn_processor.scale)} IP-Adapter."
|
||||
)
|
||||
elif len(scale_configs) == 1:
|
||||
scale_configs = scale_configs * len(attn_processor.scale)
|
||||
for i, scale_config in enumerate(scale_configs):
|
||||
attn_processor.scale[i] = scale_config[key_id]
|
||||
key_id += 1
|
||||
if any(len(s) != num_layers for s in scale if isinstance(s, list)):
|
||||
invalid_scale_sizes = {len(s) for s in scale if isinstance(s, list)} - {num_layers}
|
||||
raise ValueError(
|
||||
f"Expected list of {num_layers} scales, got {', '.join(str(x) for x in invalid_scale_sizes)}."
|
||||
)
|
||||
|
||||
# Scalars are transformed to lists with length num_layers
|
||||
scale_configs = [[s] * num_layers if isinstance(s, scale_type) else s for s in scale]
|
||||
|
||||
# Set scales. zip over scale_configs prevents going into single transformer layers
|
||||
for attn_processor, *scale in zip(self.transformer.attn_processors.values(), *scale_configs):
|
||||
attn_processor.scale = scale
|
||||
|
||||
def unload_ip_adapter(self):
|
||||
"""
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user