mirror of
https://github.com/huggingface/diffusers.git
synced 2025-12-08 21:44:27 +08:00
Compare commits
2 Commits
integratio
...
memory-opt
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
278b3b8825 | ||
|
|
d7f369cbab |
@@ -1,38 +0,0 @@
|
||||
name: "\U0001F31F Remote VAE"
|
||||
description: Feedback for remote VAE pilot
|
||||
labels: [ "Remote VAE" ]
|
||||
|
||||
body:
|
||||
- type: textarea
|
||||
id: positive
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Did you like the remote VAE solution?
|
||||
description: |
|
||||
If you liked it, we would appreciate it if you could elaborate what you liked.
|
||||
|
||||
- type: textarea
|
||||
id: feedback
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: What can be improved about the current solution?
|
||||
description: |
|
||||
Let us know the things you would like to see improved. Note that we will work optimizing the solution once the pilot is over and we have usage.
|
||||
|
||||
- type: textarea
|
||||
id: others
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: What other VAEs you would like to see if the pilot goes well?
|
||||
description: |
|
||||
Provide a list of the VAEs you would like to see in the future if the pilot goes well.
|
||||
|
||||
- type: textarea
|
||||
id: additional-info
|
||||
attributes:
|
||||
label: Notify the members of the team
|
||||
description: |
|
||||
Tag the following folks when submitting this feedback: @hlky @sayakpaul
|
||||
6
.github/workflows/nightly_tests.yml
vendored
6
.github/workflows/nightly_tests.yml
vendored
@@ -265,7 +265,7 @@ jobs:
|
||||
|
||||
- name: Run PyTorch CUDA tests
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||
run: |
|
||||
@@ -505,7 +505,7 @@ jobs:
|
||||
# shell: arch -arch arm64 bash {0}
|
||||
# env:
|
||||
# HF_HOME: /System/Volumes/Data/mnt/cache
|
||||
# HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
# HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
# run: |
|
||||
# ${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
||||
# --report-log=tests_torch_mps.log \
|
||||
@@ -561,7 +561,7 @@ jobs:
|
||||
# shell: arch -arch arm64 bash {0}
|
||||
# env:
|
||||
# HF_HOME: /System/Volumes/Data/mnt/cache
|
||||
# HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
# HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
# run: |
|
||||
# ${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
||||
# --report-log=tests_torch_mps.log \
|
||||
|
||||
127
.github/workflows/pr_style_bot.yml
vendored
127
.github/workflows/pr_style_bot.yml
vendored
@@ -1,127 +0,0 @@
|
||||
name: PR Style Bot
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
run-style-bot:
|
||||
if: >
|
||||
contains(github.event.comment.body, '@bot /style') &&
|
||||
github.event.issue.pull_request != null
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Extract PR details
|
||||
id: pr_info
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
const prNumber = context.payload.issue.number;
|
||||
const { data: pr } = await github.rest.pulls.get({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: prNumber
|
||||
});
|
||||
|
||||
// We capture both the branch ref and the "full_name" of the head repo
|
||||
// so that we can check out the correct repository & branch (including forks).
|
||||
core.setOutput("prNumber", prNumber);
|
||||
core.setOutput("headRef", pr.head.ref);
|
||||
core.setOutput("headRepoFullName", pr.head.repo.full_name);
|
||||
|
||||
- name: Check out PR branch
|
||||
uses: actions/checkout@v3
|
||||
env:
|
||||
HEADREPOFULLNAME: ${{ steps.pr_info.outputs.headRepoFullName }}
|
||||
HEADREF: ${{ steps.pr_info.outputs.headRef }}
|
||||
with:
|
||||
# Instead of checking out the base repo, use the contributor's repo name
|
||||
repository: ${{ env.HEADREPOFULLNAME }}
|
||||
ref: ${{ env.HEADREF }}
|
||||
# You may need fetch-depth: 0 for being able to push
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Debug
|
||||
env:
|
||||
HEADREPOFULLNAME: ${{ steps.pr_info.outputs.headRepoFullName }}
|
||||
HEADREF: ${{ steps.pr_info.outputs.headRef }}
|
||||
PRNUMBER: ${{ steps.pr_info.outputs.prNumber }}
|
||||
run: |
|
||||
echo "PR number: ${{ env.PRNUMBER }}"
|
||||
echo "Head Ref: ${{ env.HEADREF }}"
|
||||
echo "Head Repo Full Name: ${{ env.HEADREPOFULLNAME }}"
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install .[quality]
|
||||
|
||||
- name: Download Makefile from main branch
|
||||
run: |
|
||||
curl -o main_Makefile https://raw.githubusercontent.com/huggingface/diffusers/main/Makefile
|
||||
|
||||
- name: Compare Makefiles
|
||||
run: |
|
||||
if ! diff -q main_Makefile Makefile; then
|
||||
echo "Error: The Makefile has changed. Please ensure it matches the main branch."
|
||||
exit 1
|
||||
fi
|
||||
echo "No changes in Makefile. Proceeding..."
|
||||
rm -rf main_Makefile
|
||||
|
||||
- name: Run make style and make quality
|
||||
run: |
|
||||
make style && make quality
|
||||
|
||||
- name: Commit and push changes
|
||||
id: commit_and_push
|
||||
env:
|
||||
HEADREPOFULLNAME: ${{ steps.pr_info.outputs.headRepoFullName }}
|
||||
HEADREF: ${{ steps.pr_info.outputs.headRef }}
|
||||
PRNUMBER: ${{ steps.pr_info.outputs.prNumber }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
echo "HEADREPOFULLNAME: ${{ env.HEADREPOFULLNAME }}, HEADREF: ${{ env.HEADREF }}"
|
||||
# Configure git with the Actions bot user
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
# Make sure your 'origin' remote is set to the contributor's fork
|
||||
git remote set-url origin "https://x-access-token:${GITHUB_TOKEN}@github.com/${{ env.HEADREPOFULLNAME }}.git"
|
||||
|
||||
# If there are changes after running style/quality, commit them
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
git add .
|
||||
git commit -m "Apply style fixes"
|
||||
# Push to the original contributor's forked branch
|
||||
git push origin HEAD:${{ env.HEADREF }}
|
||||
echo "changes_pushed=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "No changes to commit."
|
||||
echo "changes_pushed=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Comment on PR with workflow run link
|
||||
if: steps.commit_and_push.outputs.changes_pushed == 'true'
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
const prNumber = parseInt(process.env.prNumber, 10);
|
||||
const runUrl = `${process.env.GITHUB_SERVER_URL}/${process.env.GITHUB_REPOSITORY}/actions/runs/${process.env.GITHUB_RUN_ID}`
|
||||
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: prNumber,
|
||||
body: `Style fixes have been applied. [View the workflow run here](${runUrl}).`
|
||||
});
|
||||
env:
|
||||
prNumber: ${{ steps.pr_info.outputs.prNumber }}
|
||||
8
.github/workflows/pr_tests.yml
vendored
8
.github/workflows/pr_tests.yml
vendored
@@ -2,8 +2,8 @@ name: Fast tests for PRs
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
types: [synchronize]
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "src/diffusers/**.py"
|
||||
- "benchmarks/**.py"
|
||||
@@ -64,7 +64,6 @@ jobs:
|
||||
run: |
|
||||
python utils/check_copies.py
|
||||
python utils/check_dummies.py
|
||||
python utils/check_support_list.py
|
||||
make deps_table_check_updated
|
||||
- name: Check if failure
|
||||
if: ${{ failure() }}
|
||||
@@ -121,8 +120,7 @@ jobs:
|
||||
run: |
|
||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||
python -m uv pip install -e [quality,test]
|
||||
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
||||
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
|
||||
python -m uv pip install accelerate
|
||||
|
||||
- name: Environment
|
||||
run: |
|
||||
|
||||
23
.github/workflows/push_tests.yml
vendored
23
.github/workflows/push_tests.yml
vendored
@@ -1,13 +1,6 @@
|
||||
name: Fast GPU Tests on main
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: main
|
||||
paths:
|
||||
- "src/diffusers/models/modeling_utils.py"
|
||||
- "src/diffusers/models/model_loading_utils.py"
|
||||
- "src/diffusers/pipelines/pipeline_utils.py"
|
||||
- "src/diffusers/pipeline_loading_utils.py"
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
@@ -167,7 +160,6 @@ jobs:
|
||||
path: reports
|
||||
|
||||
flax_tpu_tests:
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
name: Flax TPU Tests
|
||||
runs-on:
|
||||
group: gcp-ct5lp-hightpu-8t
|
||||
@@ -195,7 +187,7 @@ jobs:
|
||||
|
||||
- name: Run Flax TPU tests
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
run: |
|
||||
python -m pytest -n 0 \
|
||||
-s -v -k "Flax" \
|
||||
@@ -216,7 +208,6 @@ jobs:
|
||||
path: reports
|
||||
|
||||
onnx_cuda_tests:
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
name: ONNX CUDA Tests
|
||||
runs-on:
|
||||
group: aws-g4dn-2xlarge
|
||||
@@ -244,7 +235,7 @@ jobs:
|
||||
|
||||
- name: Run ONNXRuntime CUDA tests
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
run: |
|
||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||
-s -v -k "Onnx" \
|
||||
@@ -265,7 +256,6 @@ jobs:
|
||||
path: reports
|
||||
|
||||
run_torch_compile_tests:
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
name: PyTorch Compile CUDA tests
|
||||
|
||||
runs-on:
|
||||
@@ -293,7 +283,7 @@ jobs:
|
||||
python utils/print_env.py
|
||||
- name: Run example tests on GPU
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
RUN_COMPILE: yes
|
||||
run: |
|
||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
||||
@@ -309,7 +299,6 @@ jobs:
|
||||
path: reports
|
||||
|
||||
run_xformers_tests:
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
name: PyTorch xformers CUDA tests
|
||||
|
||||
runs-on:
|
||||
@@ -337,7 +326,7 @@ jobs:
|
||||
python utils/print_env.py
|
||||
- name: Run example tests on GPU
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
run: |
|
||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
||||
- name: Failure short reports
|
||||
@@ -360,6 +349,7 @@ jobs:
|
||||
container:
|
||||
image: diffusers/diffusers-pytorch-cuda
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host
|
||||
|
||||
steps:
|
||||
- name: Checkout diffusers
|
||||
uses: actions/checkout@v3
|
||||
@@ -369,6 +359,7 @@ jobs:
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||
@@ -381,7 +372,7 @@ jobs:
|
||||
|
||||
- name: Run example tests on GPU
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
run: |
|
||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||
python -m uv pip install timm
|
||||
|
||||
16
.github/workflows/release_tests_fast.yml
vendored
16
.github/workflows/release_tests_fast.yml
vendored
@@ -81,7 +81,7 @@ jobs:
|
||||
python utils/print_env.py
|
||||
- name: Slow PyTorch CUDA checkpoint tests on Ubuntu
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||
run: |
|
||||
@@ -135,7 +135,7 @@ jobs:
|
||||
|
||||
- name: Run PyTorch CUDA tests
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||
run: |
|
||||
@@ -186,7 +186,7 @@ jobs:
|
||||
|
||||
- name: Run PyTorch CUDA tests
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||
run: |
|
||||
@@ -241,7 +241,7 @@ jobs:
|
||||
|
||||
- name: Run slow Flax TPU tests
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
run: |
|
||||
python -m pytest -n 0 \
|
||||
-s -v -k "Flax" \
|
||||
@@ -289,7 +289,7 @@ jobs:
|
||||
|
||||
- name: Run slow ONNXRuntime CUDA tests
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
run: |
|
||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||
-s -v -k "Onnx" \
|
||||
@@ -337,7 +337,7 @@ jobs:
|
||||
python utils/print_env.py
|
||||
- name: Run example tests on GPU
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
RUN_COMPILE: yes
|
||||
run: |
|
||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
||||
@@ -380,7 +380,7 @@ jobs:
|
||||
python utils/print_env.py
|
||||
- name: Run example tests on GPU
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
run: |
|
||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
||||
- name: Failure short reports
|
||||
@@ -426,7 +426,7 @@ jobs:
|
||||
|
||||
- name: Run example tests on GPU
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
run: |
|
||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||
python -m uv pip install timm
|
||||
|
||||
14
.github/workflows/run_tests_from_a_pr.yml
vendored
14
.github/workflows/run_tests_from_a_pr.yml
vendored
@@ -7,8 +7,8 @@ on:
|
||||
default: 'diffusers/diffusers-pytorch-cuda'
|
||||
description: 'Name of the Docker image'
|
||||
required: true
|
||||
pr_number:
|
||||
description: 'PR number to test on'
|
||||
branch:
|
||||
description: 'PR Branch to test on'
|
||||
required: true
|
||||
test:
|
||||
description: 'Tests to run (e.g.: `tests/models`).'
|
||||
@@ -43,8 +43,8 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! "$PY_TEST" =~ ^tests/(models|pipelines|lora) ]]; then
|
||||
echo "Error: The input string must contain either 'models', 'pipelines', or 'lora' after 'tests/'."
|
||||
if [[ ! "$PY_TEST" =~ ^tests/(models|pipelines) ]]; then
|
||||
echo "Error: The input string must contain either 'models' or 'pipelines' after 'tests/'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -53,13 +53,13 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
echo "$PY_TEST"
|
||||
|
||||
shell: bash -e {0}
|
||||
|
||||
- name: Checkout PR branch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: refs/pull/${{ inputs.pr_number }}/head
|
||||
ref: ${{ github.event.inputs.branch }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
|
||||
|
||||
- name: Install pytest
|
||||
run: |
|
||||
|
||||
3
.github/workflows/trufflehog.yml
vendored
3
.github/workflows/trufflehog.yml
vendored
@@ -13,6 +13,3 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- name: Secret Scanning
|
||||
uses: trufflesecurity/trufflehog@main
|
||||
with:
|
||||
extra_args: --results=verified,unknown
|
||||
|
||||
|
||||
@@ -89,8 +89,6 @@
|
||||
title: Kandinsky
|
||||
- local: using-diffusers/ip_adapter
|
||||
title: IP-Adapter
|
||||
- local: using-diffusers/omnigen
|
||||
title: OmniGen
|
||||
- local: using-diffusers/pag
|
||||
title: PAG
|
||||
- local: using-diffusers/controlnet
|
||||
@@ -278,8 +276,6 @@
|
||||
title: ConsisIDTransformer3DModel
|
||||
- local: api/models/cogview3plus_transformer2d
|
||||
title: CogView3PlusTransformer2DModel
|
||||
- local: api/models/cogview4_transformer2d
|
||||
title: CogView4Transformer2DModel
|
||||
- local: api/models/dit_transformer2d
|
||||
title: DiTTransformer2DModel
|
||||
- local: api/models/flux_transformer
|
||||
@@ -292,14 +288,10 @@
|
||||
title: LatteTransformer3DModel
|
||||
- local: api/models/lumina_nextdit2d
|
||||
title: LuminaNextDiT2DModel
|
||||
- local: api/models/lumina2_transformer2d
|
||||
title: Lumina2Transformer2DModel
|
||||
- local: api/models/ltx_video_transformer3d
|
||||
title: LTXVideoTransformer3DModel
|
||||
- local: api/models/mochi_transformer3d
|
||||
title: MochiTransformer3DModel
|
||||
- local: api/models/omnigen_transformer
|
||||
title: OmniGenTransformer2DModel
|
||||
- local: api/models/pixart_transformer2d
|
||||
title: PixArtTransformer2DModel
|
||||
- local: api/models/prior_transformer
|
||||
@@ -384,8 +376,6 @@
|
||||
title: CogVideoX
|
||||
- local: api/pipelines/cogview3
|
||||
title: CogView3
|
||||
- local: api/pipelines/cogview4
|
||||
title: CogView4
|
||||
- local: api/pipelines/consisid
|
||||
title: ConsisID
|
||||
- local: api/pipelines/consistency_models
|
||||
@@ -448,8 +438,6 @@
|
||||
title: LEDITS++
|
||||
- local: api/pipelines/ltx_video
|
||||
title: LTXVideo
|
||||
- local: api/pipelines/lumina2
|
||||
title: Lumina 2.0
|
||||
- local: api/pipelines/lumina
|
||||
title: Lumina-T2X
|
||||
- local: api/pipelines/marigold
|
||||
@@ -460,8 +448,6 @@
|
||||
title: MultiDiffusion
|
||||
- local: api/pipelines/musicldm
|
||||
title: MusicLDM
|
||||
- local: api/pipelines/omnigen
|
||||
title: OmniGen
|
||||
- local: api/pipelines/pag
|
||||
title: PAG
|
||||
- local: api/pipelines/paint_by_example
|
||||
@@ -612,8 +598,6 @@
|
||||
title: Attention Processor
|
||||
- local: api/activations
|
||||
title: Custom activation functions
|
||||
- local: api/cache
|
||||
title: Caching methods
|
||||
- local: api/normalization
|
||||
title: Custom normalization layers
|
||||
- local: api/utilities
|
||||
|
||||
@@ -25,16 +25,3 @@ Customized activation functions for supporting various models in 🤗 Diffusers.
|
||||
## ApproximateGELU
|
||||
|
||||
[[autodoc]] models.activations.ApproximateGELU
|
||||
|
||||
|
||||
## SwiGLU
|
||||
|
||||
[[autodoc]] models.activations.SwiGLU
|
||||
|
||||
## FP32SiLU
|
||||
|
||||
[[autodoc]] models.activations.FP32SiLU
|
||||
|
||||
## LinearActivation
|
||||
|
||||
[[autodoc]] models.activations.LinearActivation
|
||||
|
||||
@@ -147,20 +147,3 @@ An attention processor is a class for applying different types of attention mech
|
||||
## XLAFlashAttnProcessor2_0
|
||||
|
||||
[[autodoc]] models.attention_processor.XLAFlashAttnProcessor2_0
|
||||
|
||||
## XFormersJointAttnProcessor
|
||||
|
||||
[[autodoc]] models.attention_processor.XFormersJointAttnProcessor
|
||||
|
||||
## IPAdapterXFormersAttnProcessor
|
||||
|
||||
[[autodoc]] models.attention_processor.IPAdapterXFormersAttnProcessor
|
||||
|
||||
## FluxIPAdapterJointAttnProcessor2_0
|
||||
|
||||
[[autodoc]] models.attention_processor.FluxIPAdapterJointAttnProcessor2_0
|
||||
|
||||
|
||||
## XLAFluxFlashAttnProcessor2_0
|
||||
|
||||
[[autodoc]] models.attention_processor.XLAFluxFlashAttnProcessor2_0
|
||||
@@ -1,49 +0,0 @@
|
||||
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License. -->
|
||||
|
||||
# Caching methods
|
||||
|
||||
## Pyramid Attention Broadcast
|
||||
|
||||
[Pyramid Attention Broadcast](https://huggingface.co/papers/2408.12588) from Xuanlei Zhao, Xiaolong Jin, Kai Wang, Yang You.
|
||||
|
||||
Pyramid Attention Broadcast (PAB) is a method that speeds up inference in diffusion models by systematically skipping attention computations between successive inference steps and reusing cached attention states. The attention states are not very different between successive inference steps. The most prominent difference is in the spatial attention blocks, not as much in the temporal attention blocks, and finally the least in the cross attention blocks. Therefore, many cross attention computation blocks can be skipped, followed by the temporal and spatial attention blocks. By combining other techniques like sequence parallelism and classifier-free guidance parallelism, PAB achieves near real-time video generation.
|
||||
|
||||
Enable PAB with [`~PyramidAttentionBroadcastConfig`] on any pipeline. For some benchmarks, refer to [this](https://github.com/huggingface/diffusers/pull/9562) pull request.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import CogVideoXPipeline, PyramidAttentionBroadcastConfig
|
||||
|
||||
pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16)
|
||||
pipe.to("cuda")
|
||||
|
||||
# Increasing the value of `spatial_attention_timestep_skip_range[0]` or decreasing the value of
|
||||
# `spatial_attention_timestep_skip_range[1]` will decrease the interval in which pyramid attention
|
||||
# broadcast is active, leader to slower inference speeds. However, large intervals can lead to
|
||||
# poorer quality of generated videos.
|
||||
config = PyramidAttentionBroadcastConfig(
|
||||
spatial_attention_block_skip_range=2,
|
||||
spatial_attention_timestep_skip_range=(100, 800),
|
||||
current_timestep_callback=lambda: pipe.current_timestep,
|
||||
)
|
||||
pipe.transformer.enable_cache(config)
|
||||
```
|
||||
|
||||
### CacheMixin
|
||||
|
||||
[[autodoc]] CacheMixin
|
||||
|
||||
### PyramidAttentionBroadcastConfig
|
||||
|
||||
[[autodoc]] PyramidAttentionBroadcastConfig
|
||||
|
||||
[[autodoc]] apply_pyramid_attention_broadcast
|
||||
@@ -20,10 +20,6 @@ LoRA is a fast and lightweight training method that inserts and trains a signifi
|
||||
- [`FluxLoraLoaderMixin`] provides similar functions for [Flux](https://huggingface.co/docs/diffusers/main/en/api/pipelines/flux).
|
||||
- [`CogVideoXLoraLoaderMixin`] provides similar functions for [CogVideoX](https://huggingface.co/docs/diffusers/main/en/api/pipelines/cogvideox).
|
||||
- [`Mochi1LoraLoaderMixin`] provides similar functions for [Mochi](https://huggingface.co/docs/diffusers/main/en/api/pipelines/mochi).
|
||||
- [`LTXVideoLoraLoaderMixin`] provides similar functions for [LTX-Video](https://huggingface.co/docs/diffusers/main/en/api/pipelines/ltx_video).
|
||||
- [`SanaLoraLoaderMixin`] provides similar functions for [Sana](https://huggingface.co/docs/diffusers/main/en/api/pipelines/sana).
|
||||
- [`HunyuanVideoLoraLoaderMixin`] provides similar functions for [HunyuanVideo](https://huggingface.co/docs/diffusers/main/en/api/pipelines/hunyuan_video).
|
||||
- [`Lumina2LoraLoaderMixin`] provides similar functions for [Lumina2](https://huggingface.co/docs/diffusers/main/en/api/pipelines/lumina2).
|
||||
- [`AmusedLoraLoaderMixin`] is for the [`AmusedPipeline`].
|
||||
- [`LoraBaseMixin`] provides a base class with several utility methods to fuse, unfuse, unload, LoRAs and more.
|
||||
|
||||
@@ -57,22 +53,6 @@ To learn more about how to load LoRA weights, see the [LoRA](../../using-diffuse
|
||||
|
||||
[[autodoc]] loaders.lora_pipeline.Mochi1LoraLoaderMixin
|
||||
|
||||
## LTXVideoLoraLoaderMixin
|
||||
|
||||
[[autodoc]] loaders.lora_pipeline.LTXVideoLoraLoaderMixin
|
||||
|
||||
## SanaLoraLoaderMixin
|
||||
|
||||
[[autodoc]] loaders.lora_pipeline.SanaLoraLoaderMixin
|
||||
|
||||
## HunyuanVideoLoraLoaderMixin
|
||||
|
||||
[[autodoc]] loaders.lora_pipeline.HunyuanVideoLoraLoaderMixin
|
||||
|
||||
## Lumina2LoraLoaderMixin
|
||||
|
||||
[[autodoc]] loaders.lora_pipeline.Lumina2LoraLoaderMixin
|
||||
|
||||
## AmusedLoraLoaderMixin
|
||||
|
||||
[[autodoc]] loaders.lora_pipeline.AmusedLoraLoaderMixin
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License. -->
|
||||
|
||||
# CogView4Transformer2DModel
|
||||
|
||||
A Diffusion Transformer model for 2D data from [CogView4]()
|
||||
|
||||
The model can be loaded with the following code snippet.
|
||||
|
||||
```python
|
||||
from diffusers import CogView4Transformer2DModel
|
||||
|
||||
transformer = CogView4Transformer2DModel.from_pretrained("THUDM/CogView4-6B", subfolder="transformer", torch_dtype=torch.bfloat16).to("cuda")
|
||||
```
|
||||
|
||||
## CogView4Transformer2DModel
|
||||
|
||||
[[autodoc]] CogView4Transformer2DModel
|
||||
|
||||
## Transformer2DModelOutput
|
||||
|
||||
[[autodoc]] models.modeling_outputs.Transformer2DModelOutput
|
||||
@@ -1,30 +0,0 @@
|
||||
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License. -->
|
||||
|
||||
# Lumina2Transformer2DModel
|
||||
|
||||
A Diffusion Transformer model for 3D video-like data was introduced in [Lumina Image 2.0](https://huggingface.co/Alpha-VLLM/Lumina-Image-2.0) by Alpha-VLLM.
|
||||
|
||||
The model can be loaded with the following code snippet.
|
||||
|
||||
```python
|
||||
from diffusers import Lumina2Transformer2DModel
|
||||
|
||||
transformer = Lumina2Transformer2DModel.from_pretrained("Alpha-VLLM/Lumina-Image-2.0", subfolder="transformer", torch_dtype=torch.bfloat16)
|
||||
```
|
||||
|
||||
## Lumina2Transformer2DModel
|
||||
|
||||
[[autodoc]] Lumina2Transformer2DModel
|
||||
|
||||
## Transformer2DModelOutput
|
||||
|
||||
[[autodoc]] models.modeling_outputs.Transformer2DModelOutput
|
||||
@@ -1,30 +0,0 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# OmniGenTransformer2DModel
|
||||
|
||||
A Transformer model that accepts multimodal instructions to generate images for [OmniGen](https://github.com/VectorSpaceLab/OmniGen/).
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*The emergence of Large Language Models (LLMs) has unified language generation tasks and revolutionized human-machine interaction. However, in the realm of image generation, a unified model capable of handling various tasks within a single framework remains largely unexplored. In this work, we introduce OmniGen, a new diffusion model for unified image generation. OmniGen is characterized by the following features: 1) Unification: OmniGen not only demonstrates text-to-image generation capabilities but also inherently supports various downstream tasks, such as image editing, subject-driven generation, and visual conditional generation. 2) Simplicity: The architecture of OmniGen is highly simplified, eliminating the need for additional plugins. Moreover, compared to existing diffusion models, it is more user-friendly and can complete complex tasks end-to-end through instructions without the need for extra intermediate steps, greatly simplifying the image generation workflow. 3) Knowledge Transfer: Benefit from learning in a unified format, OmniGen effectively transfers knowledge across different tasks, manages unseen tasks and domains, and exhibits novel capabilities. We also explore the model’s reasoning capabilities and potential applications of the chain-of-thought mechanism. This work represents the first attempt at a general-purpose image generation model, and we will release our resources at https://github.com/VectorSpaceLab/OmniGen to foster future advancements.*
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import OmniGenTransformer2DModel
|
||||
|
||||
transformer = OmniGenTransformer2DModel.from_pretrained("Shitao/OmniGen-v1-diffusers", subfolder="transformer", torch_dtype=torch.bfloat16)
|
||||
```
|
||||
|
||||
## OmniGenTransformer2DModel
|
||||
|
||||
[[autodoc]] OmniGenTransformer2DModel
|
||||
@@ -29,43 +29,3 @@ Customized normalization layers for supporting various models in 🤗 Diffusers.
|
||||
## AdaGroupNorm
|
||||
|
||||
[[autodoc]] models.normalization.AdaGroupNorm
|
||||
|
||||
## AdaLayerNormContinuous
|
||||
|
||||
[[autodoc]] models.normalization.AdaLayerNormContinuous
|
||||
|
||||
## RMSNorm
|
||||
|
||||
[[autodoc]] models.normalization.RMSNorm
|
||||
|
||||
## GlobalResponseNorm
|
||||
|
||||
[[autodoc]] models.normalization.GlobalResponseNorm
|
||||
|
||||
|
||||
## LuminaLayerNormContinuous
|
||||
[[autodoc]] models.normalization.LuminaLayerNormContinuous
|
||||
|
||||
## SD35AdaLayerNormZeroX
|
||||
[[autodoc]] models.normalization.SD35AdaLayerNormZeroX
|
||||
|
||||
## AdaLayerNormZeroSingle
|
||||
[[autodoc]] models.normalization.AdaLayerNormZeroSingle
|
||||
|
||||
## LuminaRMSNormZero
|
||||
[[autodoc]] models.normalization.LuminaRMSNormZero
|
||||
|
||||
## LpNorm
|
||||
[[autodoc]] models.normalization.LpNorm
|
||||
|
||||
## CogView3PlusAdaLayerNormZeroTextImage
|
||||
[[autodoc]] models.normalization.CogView3PlusAdaLayerNormZeroTextImage
|
||||
|
||||
## CogVideoXLayerNormZero
|
||||
[[autodoc]] models.normalization.CogVideoXLayerNormZero
|
||||
|
||||
## MochiRMSNormZero
|
||||
[[autodoc]] models.transformers.transformer_mochi.MochiRMSNormZero
|
||||
|
||||
## MochiRMSNorm
|
||||
[[autodoc]] models.normalization.MochiRMSNorm
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Text-to-Video Generation with AnimateDiff
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
## Overview
|
||||
|
||||
[AnimateDiff: Animate Your Personalized Text-to-Image Diffusion Models without Specific Tuning](https://arxiv.org/abs/2307.04725) by Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, Bo Dai.
|
||||
|
||||
@@ -15,10 +15,6 @@
|
||||
|
||||
# CogVideoX
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer](https://arxiv.org/abs/2408.06072) from Tsinghua University & ZhipuAI, by Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, Da Yin, Xiaotao Gu, Yuxuan Zhang, Weihan Wang, Yean Cheng, Ting Liu, Bin Xu, Yuxiao Dong, Jie Tang.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
-->
|
||||
|
||||
# CogView4
|
||||
|
||||
<Tip>
|
||||
|
||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||
|
||||
</Tip>
|
||||
|
||||
This pipeline was contributed by [zRzRzRzRzRzRzR](https://github.com/zRzRzRzRzRzRzR). The original codebase can be found [here](https://huggingface.co/THUDM). The original weights can be found under [hf.co/THUDM](https://huggingface.co/THUDM).
|
||||
|
||||
## CogView4Pipeline
|
||||
|
||||
[[autodoc]] CogView4Pipeline
|
||||
- all
|
||||
- __call__
|
||||
|
||||
## CogView4PipelineOutput
|
||||
|
||||
[[autodoc]] pipelines.cogview4.pipeline_output.CogView4PipelineOutput
|
||||
@@ -15,10 +15,6 @@
|
||||
|
||||
# ConsisID
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[Identity-Preserving Text-to-Video Generation by Frequency Decomposition](https://arxiv.org/abs/2411.17440) from Peking University & University of Rochester & etc, by Shenghai Yuan, Jinfa Huang, Xianyi He, Yunyang Ge, Yujun Shi, Liuhan Chen, Jiebo Luo, Li Yuan.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# FluxControlInpaint
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
FluxControlInpaintPipeline is an implementation of Inpainting for Flux.1 Depth/Canny models. It is a pipeline that allows you to inpaint images using the Flux.1 Depth/Canny models. The pipeline takes an image and a mask as input and returns the inpainted image.
|
||||
|
||||
FLUX.1 Depth and Canny [dev] is a 12 billion parameter rectified flow transformer capable of generating an image based on a text description while following the structure of a given input image. **This is not a ControlNet model**.
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# ControlNet
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
ControlNet was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala.
|
||||
|
||||
With a ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide a depth map, the ControlNet model generates an image that'll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process.
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# ControlNet with Flux.1
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
FluxControlNetPipeline is an implementation of ControlNet for Flux.1.
|
||||
|
||||
ControlNet was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala.
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# ControlNet with Stable Diffusion 3
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
StableDiffusion3ControlNetPipeline is an implementation of ControlNet for Stable Diffusion 3.
|
||||
|
||||
ControlNet was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala.
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# ControlNet with Stable Diffusion XL
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
ControlNet was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala.
|
||||
|
||||
With a ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide a depth map, the ControlNet model generates an image that'll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process.
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# ControlNetUnion
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
ControlNetUnionModel is an implementation of ControlNet for Stable Diffusion XL.
|
||||
|
||||
The ControlNet model was introduced in [ControlNetPlus](https://github.com/xinsir6/ControlNetPlus) by xinsir6. It supports multiple conditioning inputs without increasing computation.
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# ControlNet-XS
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
ControlNet-XS was introduced in [ControlNet-XS](https://vislearn.github.io/ControlNet-XS/) by Denis Zavadski and Carsten Rother. It is based on the observation that the control model in the [original ControlNet](https://huggingface.co/papers/2302.05543) can be made much smaller and still produce good results.
|
||||
|
||||
Like the original ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide a depth map, the ControlNet model generates an image that'll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process.
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# DeepFloyd IF
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
## Overview
|
||||
|
||||
DeepFloyd IF is a novel state-of-the-art open-source text-to-image model with a high degree of photorealism and language understanding.
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Flux
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
Flux is a series of text-to-image generation models based on diffusion transformers. To know more about Flux, check out the original [blog post](https://blackforestlabs.ai/announcing-black-forest-labs/) by the creators of Flux, Black Forest Labs.
|
||||
|
||||
Original model checkpoints for Flux can be found [here](https://huggingface.co/black-forest-labs). Original inference code can be found [here](https://github.com/black-forest-labs/flux).
|
||||
@@ -313,53 +309,6 @@ image.save("output.png")
|
||||
|
||||
When unloading the Control LoRA weights, call `pipe.unload_lora_weights(reset_to_overwritten_params=True)` to reset the `pipe.transformer` completely back to its original form. The resultant pipeline can then be used with methods like [`DiffusionPipeline.from_pipe`]. More details about this argument are available in [this PR](https://github.com/huggingface/diffusers/pull/10397).
|
||||
|
||||
## IP-Adapter
|
||||
|
||||
<Tip>
|
||||
|
||||
Check out [IP-Adapter](../../../using-diffusers/ip_adapter) to learn more about how IP-Adapters work.
|
||||
|
||||
</Tip>
|
||||
|
||||
An IP-Adapter lets you prompt Flux with images, in addition to the text prompt. This is especially useful when describing complex concepts that are difficult to articulate through text alone and you have reference images.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import FluxPipeline
|
||||
from diffusers.utils import load_image
|
||||
|
||||
pipe = FluxPipeline.from_pretrained(
|
||||
"black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16
|
||||
).to("cuda")
|
||||
|
||||
image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux_ip_adapter_input.jpg").resize((1024, 1024))
|
||||
|
||||
pipe.load_ip_adapter(
|
||||
"XLabs-AI/flux-ip-adapter",
|
||||
weight_name="ip_adapter.safetensors",
|
||||
image_encoder_pretrained_model_name_or_path="openai/clip-vit-large-patch14"
|
||||
)
|
||||
pipe.set_ip_adapter_scale(1.0)
|
||||
|
||||
image = pipe(
|
||||
width=1024,
|
||||
height=1024,
|
||||
prompt="wearing sunglasses",
|
||||
negative_prompt="",
|
||||
true_cfg=4.0,
|
||||
generator=torch.Generator().manual_seed(4444),
|
||||
ip_adapter_image=image,
|
||||
).images[0]
|
||||
|
||||
image.save('flux_ip_adapter_output.jpg')
|
||||
```
|
||||
|
||||
<div class="justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux_ip_adapter_output.jpg"/>
|
||||
<figcaption class="mt-2 text-sm text-center text-gray-500">IP-Adapter examples with prompt "wearing sunglasses"</figcaption>
|
||||
</div>
|
||||
|
||||
|
||||
## Running FP16 inference
|
||||
|
||||
Flux can generate high-quality images with FP16 (i.e. to accelerate inference on Turing/Volta GPUs) but produces different outputs compared to FP32/BF16. The issue is that some activations in the text encoders have to be clipped when running in FP16, which affects the overall image. Forcing text encoders to run with FP32 inference thus removes this output difference. See [here](https://github.com/huggingface/diffusers/pull/9097#issuecomment-2272292516) for details.
|
||||
|
||||
@@ -14,10 +14,6 @@
|
||||
|
||||
# HunyuanVideo
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[HunyuanVideo](https://www.arxiv.org/abs/2412.03603) by Tencent.
|
||||
|
||||
*Recent advancements in video generation have significantly impacted daily life for both individuals and industries. However, the leading video generation models remain closed-source, resulting in a notable performance gap between industry capabilities and those available to the public. In this report, we introduce HunyuanVideo, an innovative open-source video foundation model that demonstrates performance in video generation comparable to, or even surpassing, that of leading closed-source models. HunyuanVideo encompasses a comprehensive framework that integrates several key elements, including data curation, advanced architectural design, progressive model scaling and training, and an efficient infrastructure tailored for large-scale model training and inference. As a result, we successfully trained a video generative model with over 13 billion parameters, making it the largest among all open-source models. We conducted extensive experiments and implemented a series of targeted designs to ensure high visual quality, motion dynamics, text-video alignment, and advanced filming techniques. According to evaluations by professionals, HunyuanVideo outperforms previous state-of-the-art models, including Runway Gen-3, Luma 1.6, and three top-performing Chinese video generative models. By releasing the code for the foundation model and its applications, we aim to bridge the gap between closed-source and open-source communities. This initiative will empower individuals within the community to experiment with their ideas, fostering a more dynamic and vibrant video generation ecosystem. The code is publicly available at [this https URL](https://github.com/tencent/HunyuanVideo).*
|
||||
@@ -36,21 +32,6 @@ Recommendations for inference:
|
||||
- For smaller resolution videos, try lower values of `shift` (between `2.0` to `5.0`) in the [Scheduler](https://huggingface.co/docs/diffusers/main/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler.shift). For larger resolution images, try higher values (between `7.0` and `12.0`). The default value is `7.0` for HunyuanVideo.
|
||||
- For more information about supported resolutions and other details, please refer to the original repository [here](https://github.com/Tencent/HunyuanVideo/).
|
||||
|
||||
## Available models
|
||||
|
||||
The following models are available for the [`HunyuanVideoPipeline`](text-to-video) pipeline:
|
||||
|
||||
| Model name | Description |
|
||||
|:---|:---|
|
||||
| [`hunyuanvideo-community/HunyuanVideo`](https://huggingface.co/hunyuanvideo-community/HunyuanVideo) | Official HunyuanVideo (guidance-distilled). Performs best at multiple resolutions and frames. Performs best with `guidance_scale=6.0`, `true_cfg_scale=1.0` and without a negative prompt. |
|
||||
| [`https://huggingface.co/Skywork/SkyReels-V1-Hunyuan-T2V`](https://huggingface.co/Skywork/SkyReels-V1-Hunyuan-T2V) | Skywork's custom finetune of HunyuanVideo (de-distilled). Performs best with `97x544x960` resolution, `guidance_scale=1.0`, `true_cfg_scale=6.0` and a negative prompt. |
|
||||
|
||||
The following models are available for the image-to-video pipeline:
|
||||
|
||||
| Model name | Description |
|
||||
|:---|:---|
|
||||
| [`https://huggingface.co/Skywork/SkyReels-V1-Hunyuan-I2V`](https://huggingface.co/Skywork/SkyReels-V1-Hunyuan-I2V) | Skywork's custom finetune of HunyuanVideo (de-distilled). Performs best with `97x544x960` resolution. Performs best at `97x544x960` resolution, `guidance_scale=1.0`, `true_cfg_scale=6.0` and a negative prompt. |
|
||||
|
||||
## Quantization
|
||||
|
||||
Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model.
|
||||
|
||||
@@ -9,10 +9,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Kandinsky 3
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
Kandinsky 3 is created by [Vladimir Arkhipkin](https://github.com/oriBetelgeuse),[Anastasia Maltseva](https://github.com/NastyaMittseva),[Igor Pavlov](https://github.com/boomb0om),[Andrei Filatov](https://github.com/anvilarth),[Arseniy Shakhmatov](https://github.com/cene555),[Andrey Kuznetsov](https://github.com/kuznetsoffandrey),[Denis Dimitrov](https://github.com/denndimitrov), [Zein Shaheen](https://github.com/zeinsh)
|
||||
|
||||
The description from it's GitHub page:
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Kolors: Effective Training of Diffusion Model for Photorealistic Text-to-Image Synthesis
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||

|
||||
|
||||
Kolors is a large-scale text-to-image generation model based on latent diffusion, developed by [the Kuaishou Kolors team](https://github.com/Kwai-Kolors/Kolors). Trained on billions of text-image pairs, Kolors exhibits significant advantages over both open-source and closed-source models in visual quality, complex semantic accuracy, and text rendering for both Chinese and English characters. Furthermore, Kolors supports both Chinese and English inputs, demonstrating strong performance in understanding and generating Chinese-specific content. For more details, please refer to this [technical report](https://github.com/Kwai-Kolors/Kolors/blob/master/imgs/Kolors_paper.pdf).
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Latent Consistency Models
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
Latent Consistency Models (LCMs) were proposed in [Latent Consistency Models: Synthesizing High-Resolution Images with Few-Step Inference](https://huggingface.co/papers/2310.04378) by Simian Luo, Yiqin Tan, Longbo Huang, Jian Li, and Hang Zhao.
|
||||
|
||||
The abstract of the paper is as follows:
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# LEDITS++
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
LEDITS++ was proposed in [LEDITS++: Limitless Image Editing using Text-to-Image Models](https://huggingface.co/papers/2311.16711) by Manuel Brack, Felix Friedrich, Katharina Kornmeier, Linoy Tsaban, Patrick Schramowski, Kristian Kersting, Apolinário Passos.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -14,10 +14,6 @@
|
||||
|
||||
# LTX Video
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[LTX Video](https://huggingface.co/Lightricks/LTX-Video) is the first DiT-based video generation model capable of generating high-quality videos in real-time. It produces 24 FPS videos at a 768x512 resolution faster than they can be watched. Trained on a large-scale dataset of diverse videos, the model generates high-resolution videos with realistic and varied content. We provide a model for both text-to-video as well as image + text-to-video usecases.
|
||||
|
||||
<Tip>
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License. -->
|
||||
|
||||
# Lumina2
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[Lumina Image 2.0: A Unified and Efficient Image Generative Model](https://huggingface.co/Alpha-VLLM/Lumina-Image-2.0) is a 2 billion parameter flow-based diffusion transformer capable of generating diverse images from text descriptions.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*We introduce Lumina-Image 2.0, an advanced text-to-image model that surpasses previous state-of-the-art methods across multiple benchmarks, while also shedding light on its potential to evolve into a generalist vision intelligence model. Lumina-Image 2.0 exhibits three key properties: (1) Unification – it adopts a unified architecture that treats text and image tokens as a joint sequence, enabling natural cross-modal interactions and facilitating task expansion. Besides, since high-quality captioners can provide semantically better-aligned text-image training pairs, we introduce a unified captioning system, UniCaptioner, which generates comprehensive and precise captions for the model. This not only accelerates model convergence but also enhances prompt adherence, variable-length prompt handling, and task generalization via prompt templates. (2) Efficiency – to improve the efficiency of the unified architecture, we develop a set of optimization techniques that improve semantic learning and fine-grained texture generation during training while incorporating inference-time acceleration strategies without compromising image quality. (3) Transparency – we open-source all training details, code, and models to ensure full reproducibility, aiming to bridge the gap between well-resourced closed-source research teams and independent developers.*
|
||||
|
||||
<Tip>
|
||||
|
||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Using Single File loading with Lumina Image 2.0
|
||||
|
||||
Single file loading for Lumina Image 2.0 is available for the `Lumina2Transformer2DModel`
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import Lumina2Transformer2DModel, Lumina2Text2ImgPipeline
|
||||
|
||||
ckpt_path = "https://huggingface.co/Alpha-VLLM/Lumina-Image-2.0/blob/main/consolidated.00-of-01.pth"
|
||||
transformer = Lumina2Transformer2DModel.from_single_file(
|
||||
ckpt_path, torch_dtype=torch.bfloat16
|
||||
)
|
||||
|
||||
pipe = Lumina2Text2ImgPipeline.from_pretrained(
|
||||
"Alpha-VLLM/Lumina-Image-2.0", transformer=transformer, torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.enable_model_cpu_offload()
|
||||
image = pipe(
|
||||
"a cat holding a sign that says hello",
|
||||
generator=torch.Generator("cpu").manual_seed(0),
|
||||
).images[0]
|
||||
image.save("lumina-single-file.png")
|
||||
|
||||
```
|
||||
|
||||
## Using GGUF Quantized Checkpoints with Lumina Image 2.0
|
||||
|
||||
GGUF Quantized checkpoints for the `Lumina2Transformer2DModel` can be loaded via `from_single_file` with the `GGUFQuantizationConfig`
|
||||
|
||||
```python
|
||||
from diffusers import Lumina2Transformer2DModel, Lumina2Text2ImgPipeline, GGUFQuantizationConfig
|
||||
|
||||
ckpt_path = "https://huggingface.co/calcuis/lumina-gguf/blob/main/lumina2-q4_0.gguf"
|
||||
transformer = Lumina2Transformer2DModel.from_single_file(
|
||||
ckpt_path,
|
||||
quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16),
|
||||
torch_dtype=torch.bfloat16,
|
||||
)
|
||||
|
||||
pipe = Lumina2Text2ImgPipeline.from_pretrained(
|
||||
"Alpha-VLLM/Lumina-Image-2.0", transformer=transformer, torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.enable_model_cpu_offload()
|
||||
image = pipe(
|
||||
"a cat holding a sign that says hello",
|
||||
generator=torch.Generator("cpu").manual_seed(0),
|
||||
).images[0]
|
||||
image.save("lumina-gguf.png")
|
||||
```
|
||||
|
||||
## Lumina2Text2ImgPipeline
|
||||
|
||||
[[autodoc]] Lumina2Text2ImgPipeline
|
||||
- all
|
||||
- __call__
|
||||
@@ -15,10 +15,6 @@
|
||||
|
||||
# Mochi 1 Preview
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
> [!TIP]
|
||||
> Only a research preview of the model weights is available at the moment.
|
||||
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
-->
|
||||
|
||||
# OmniGen
|
||||
|
||||
[OmniGen: Unified Image Generation](https://arxiv.org/pdf/2409.11340) from BAAI, by Shitao Xiao, Yueze Wang, Junjie Zhou, Huaying Yuan, Xingrun Xing, Ruiran Yan, Chaofan Li, Shuting Wang, Tiejun Huang, Zheng Liu.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*The emergence of Large Language Models (LLMs) has unified language generation tasks and revolutionized human-machine interaction. However, in the realm of image generation, a unified model capable of handling various tasks within a single framework remains largely unexplored. In this work, we introduce OmniGen, a new diffusion model for unified image generation. OmniGen is characterized by the following features: 1) Unification: OmniGen not only demonstrates text-to-image generation capabilities but also inherently supports various downstream tasks, such as image editing, subject-driven generation, and visual conditional generation. 2) Simplicity: The architecture of OmniGen is highly simplified, eliminating the need for additional plugins. Moreover, compared to existing diffusion models, it is more user-friendly and can complete complex tasks end-to-end through instructions without the need for extra intermediate steps, greatly simplifying the image generation workflow. 3) Knowledge Transfer: Benefit from learning in a unified format, OmniGen effectively transfers knowledge across different tasks, manages unseen tasks and domains, and exhibits novel capabilities. We also explore the model’s reasoning capabilities and potential applications of the chain-of-thought mechanism. This work represents the first attempt at a general-purpose image generation model, and we will release our resources at https://github.com/VectorSpaceLab/OmniGen to foster future advancements.*
|
||||
|
||||
<Tip>
|
||||
|
||||
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers.md) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading.md#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
|
||||
|
||||
</Tip>
|
||||
|
||||
This pipeline was contributed by [staoxiao](https://github.com/staoxiao). The original codebase can be found [here](https://github.com/VectorSpaceLab/OmniGen). The original weights can be found under [hf.co/shitao](https://huggingface.co/Shitao/OmniGen-v1).
|
||||
|
||||
## Inference
|
||||
|
||||
First, load the pipeline:
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import OmniGenPipeline
|
||||
|
||||
pipe = OmniGenPipeline.from_pretrained("Shitao/OmniGen-v1-diffusers", torch_dtype=torch.bfloat16)
|
||||
pipe.to("cuda")
|
||||
```
|
||||
|
||||
For text-to-image, pass a text prompt. By default, OmniGen generates a 1024x1024 image.
|
||||
You can try setting the `height` and `width` parameters to generate images with different size.
|
||||
|
||||
```python
|
||||
prompt = "Realistic photo. A young woman sits on a sofa, holding a book and facing the camera. She wears delicate silver hoop earrings adorned with tiny, sparkling diamonds that catch the light, with her long chestnut hair cascading over her shoulders. Her eyes are focused and gentle, framed by long, dark lashes. She is dressed in a cozy cream sweater, which complements her warm, inviting smile. Behind her, there is a table with a cup of water in a sleek, minimalist blue mug. The background is a serene indoor setting with soft natural light filtering through a window, adorned with tasteful art and flowers, creating a cozy and peaceful ambiance. 4K, HD."
|
||||
image = pipe(
|
||||
prompt=prompt,
|
||||
height=1024,
|
||||
width=1024,
|
||||
guidance_scale=3,
|
||||
generator=torch.Generator(device="cpu").manual_seed(111),
|
||||
).images[0]
|
||||
image.save("output.png")
|
||||
```
|
||||
|
||||
OmniGen supports multimodal inputs.
|
||||
When the input includes an image, you need to add a placeholder `<img><|image_1|></img>` in the text prompt to represent the image.
|
||||
It is recommended to enable `use_input_image_size_as_output` to keep the edited image the same size as the original image.
|
||||
|
||||
```python
|
||||
prompt="<img><|image_1|></img> Remove the woman's earrings. Replace the mug with a clear glass filled with sparkling iced cola."
|
||||
input_images=[load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/t2i_woman_with_book.png")]
|
||||
image = pipe(
|
||||
prompt=prompt,
|
||||
input_images=input_images,
|
||||
guidance_scale=2,
|
||||
img_guidance_scale=1.6,
|
||||
use_input_image_size_as_output=True,
|
||||
generator=torch.Generator(device="cpu").manual_seed(222)).images[0]
|
||||
image.save("output.png")
|
||||
```
|
||||
|
||||
## OmniGenPipeline
|
||||
|
||||
[[autodoc]] OmniGenPipeline
|
||||
- all
|
||||
- __call__
|
||||
@@ -54,7 +54,7 @@ The table below lists all the pipelines currently available in 🤗 Diffusers an
|
||||
| [DiT](dit) | text2image |
|
||||
| [Flux](flux) | text2image |
|
||||
| [Hunyuan-DiT](hunyuandit) | text2image |
|
||||
| [I2VGen-XL](i2vgenxl) | image2video |
|
||||
| [I2VGen-XL](i2vgenxl) | text2video |
|
||||
| [InstructPix2Pix](pix2pix) | image editing |
|
||||
| [Kandinsky 2.1](kandinsky) | text2image, image2image, inpainting, interpolation |
|
||||
| [Kandinsky 2.2](kandinsky_v22) | text2image, image2image, inpainting |
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Perturbed-Attention Guidance
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[Perturbed-Attention Guidance (PAG)](https://ku-cvlab.github.io/Perturbed-Attention-Guidance/) is a new diffusion sampling guidance that improves sample quality across both unconditional and conditional settings, achieving this without requiring further training or the integration of external modules.
|
||||
|
||||
PAG was introduced in [Self-Rectifying Diffusion Sampling with Perturbed-Attention Guidance](https://huggingface.co/papers/2403.17377) by Donghoon Ahn, Hyoungwon Cho, Jaewon Min, Wooseok Jang, Jungwoo Kim, SeonHwa Kim, Hyun Hee Park, Kyong Hwan Jin and Seungryong Kim.
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# MultiDiffusion
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[MultiDiffusion: Fusing Diffusion Paths for Controlled Image Generation](https://huggingface.co/papers/2302.08113) is by Omer Bar-Tal, Lior Yariv, Yaron Lipman, and Tali Dekel.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Image-to-Video Generation with PIA (Personalized Image Animator)
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
## Overview
|
||||
|
||||
[PIA: Your Personalized Image Animator via Plug-and-Play Modules in Text-to-Image Models](https://arxiv.org/abs/2312.13964) by Yiming Zhang, Zhening Xing, Yanhong Zeng, Youqing Fang, Kai Chen
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# InstructPix2Pix
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[InstructPix2Pix: Learning to Follow Image Editing Instructions](https://huggingface.co/papers/2211.09800) is by Tim Brooks, Aleksander Holynski and Alexei A. Efros.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -14,10 +14,6 @@
|
||||
|
||||
# SanaPipeline
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[SANA: Efficient High-Resolution Image Synthesis with Linear Diffusion Transformers](https://huggingface.co/papers/2410.10629) from NVIDIA and MIT HAN Lab, by Enze Xie, Junsong Chen, Junyu Chen, Han Cai, Haotian Tang, Yujun Lin, Zhekai Zhang, Muyang Li, Ligeng Zhu, Yao Lu, Song Han.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Depth-to-image
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
The Stable Diffusion model can also infer depth based on an image using [MiDaS](https://github.com/isl-org/MiDaS). This allows you to pass a text prompt and an initial image to condition the generation of new images as well as a `depth_map` to preserve the image structure.
|
||||
|
||||
<Tip>
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Image-to-image
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
The Stable Diffusion model can also be applied to image-to-image generation by passing a text prompt and an initial image to condition the generation of new images.
|
||||
|
||||
The [`StableDiffusionImg2ImgPipeline`] uses the diffusion-denoising mechanism proposed in [SDEdit: Guided Image Synthesis and Editing with Stochastic Differential Equations](https://huggingface.co/papers/2108.01073) by Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jiajun Wu, Jun-Yan Zhu, Stefano Ermon.
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Inpainting
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
The Stable Diffusion model can also be applied to inpainting which lets you edit specific parts of an image by providing a mask and a text prompt using Stable Diffusion.
|
||||
|
||||
## Tips
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Text-to-(RGB, depth)
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
LDM3D was proposed in [LDM3D: Latent Diffusion Model for 3D](https://huggingface.co/papers/2305.10853) by Gabriela Ben Melech Stan, Diana Wofk, Scottie Fox, Alex Redden, Will Saxton, Jean Yu, Estelle Aflalo, Shao-Yen Tseng, Fabio Nonato, Matthias Muller, and Vasudev Lal. LDM3D generates an image and a depth map from a given text prompt unlike the existing text-to-image diffusion models such as [Stable Diffusion](./overview) which only generates an image. With almost the same number of parameters, LDM3D achieves to create a latent space that can compress both the RGB images and the depth maps.
|
||||
|
||||
Two checkpoints are available for use:
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Stable Diffusion pipelines
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
Stable Diffusion is a text-to-image latent diffusion model created by the researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/) and [LAION](https://laion.ai/). Latent diffusion applies the diffusion process over a lower dimensional latent space to reduce memory and compute complexity. This specific type of diffusion model was proposed in [High-Resolution Image Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) by Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, Björn Ommer.
|
||||
|
||||
Stable Diffusion is trained on 512x512 images from a subset of the LAION-5B dataset. This model uses a frozen CLIP ViT-L/14 text encoder to condition the model on text prompts. With its 860M UNet and 123M text encoder, the model is relatively lightweight and can run on consumer GPUs.
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Stable Diffusion 3
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
Stable Diffusion 3 (SD3) was proposed in [Scaling Rectified Flow Transformers for High-Resolution Image Synthesis](https://arxiv.org/pdf/2403.03206.pdf) by Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Muller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, Dustin Podell, Tim Dockhorn, Zion English, Kyle Lacey, Alex Goodwin, Yannik Marek, and Robin Rombach.
|
||||
|
||||
The abstract from the paper is:
|
||||
@@ -81,7 +77,7 @@ from diffusers import StableDiffusion3Pipeline
|
||||
from transformers import SiglipVisionModel, SiglipImageProcessor
|
||||
|
||||
image_encoder_id = "google/siglip-so400m-patch14-384"
|
||||
ip_adapter_id = "InstantX/SD3.5-Large-IP-Adapter"
|
||||
ip_adapter_id = "guiyrt/InstantX-SD3.5-Large-IP-Adapter-diffusers"
|
||||
|
||||
feature_extractor = SiglipImageProcessor.from_pretrained(
|
||||
image_encoder_id,
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Stable Diffusion XL
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
Stable Diffusion XL (SDXL) was proposed in [SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis](https://huggingface.co/papers/2307.01952) by Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Text-to-image
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
The Stable Diffusion model was created by researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/), [Runway](https://github.com/runwayml), and [LAION](https://laion.ai/). The [`StableDiffusionPipeline`] is capable of generating photorealistic images given any text input. It's trained on 512x512 images from a subset of the LAION-5B dataset. This model uses a frozen CLIP ViT-L/14 text encoder to condition the model on text prompts. With its 860M UNet and 123M text encoder, the model is relatively lightweight and can run on consumer GPUs. Latent diffusion is the research on top of which Stable Diffusion was built. It was proposed in [High-Resolution Image Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) by Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, Björn Ommer.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Super-resolution
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
The Stable Diffusion upscaler diffusion model was created by the researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/), and [LAION](https://laion.ai/). It is used to enhance the resolution of input images by a factor of 4.
|
||||
|
||||
<Tip>
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Stable unCLIP
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
Stable unCLIP checkpoints are finetuned from [Stable Diffusion 2.1](./stable_diffusion/stable_diffusion_2) checkpoints to condition on CLIP image embeddings.
|
||||
Stable unCLIP still conditions on text embeddings. Given the two separate conditionings, stable unCLIP can be used
|
||||
for text guided image variation. When combined with an unCLIP prior, it can also be used for full text to image generation.
|
||||
|
||||
@@ -18,10 +18,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Text-to-video
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[ModelScope Text-to-Video Technical Report](https://arxiv.org/abs/2308.06571) is by Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, Shiwei Zhang.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Text2Video-Zero
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
[Text2Video-Zero: Text-to-Image Diffusion Models are Zero-Shot Video Generators](https://huggingface.co/papers/2303.13439) is by Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, [Zhangyang Wang](https://www.ece.utexas.edu/people/faculty/atlas-wang), Shant Navasardyan, [Humphrey Shi](https://www.humphreyshi.com).
|
||||
|
||||
Text2Video-Zero enables zero-shot video generation using either:
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# UniDiffuser
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
The UniDiffuser model was proposed in [One Transformer Fits All Distributions in Multi-Modal Diffusion at Scale](https://huggingface.co/papers/2303.06555) by Fan Bao, Shen Nie, Kaiwen Xue, Chongxuan Li, Shi Pu, Yaole Wang, Gang Yue, Yue Cao, Hang Su, Jun Zhu.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
@@ -12,10 +12,6 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Würstchen
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
|
||||
</div>
|
||||
|
||||
<img src="https://github.com/dome272/Wuerstchen/assets/61938694/0617c863-165a-43ee-9303-2a17299a0cf9">
|
||||
|
||||
[Wuerstchen: An Efficient Architecture for Large-Scale Text-to-Image Diffusion Models](https://huggingface.co/papers/2306.00637) is by Pablo Pernias, Dominic Rampas, Mats L. Richter and Christopher Pal and Marc Aubreville.
|
||||
|
||||
@@ -41,11 +41,3 @@ Utility and helper functions for working with 🤗 Diffusers.
|
||||
## randn_tensor
|
||||
|
||||
[[autodoc]] utils.torch_utils.randn_tensor
|
||||
|
||||
## apply_layerwise_casting
|
||||
|
||||
[[autodoc]] hooks.layerwise_casting.apply_layerwise_casting
|
||||
|
||||
## apply_group_offloading
|
||||
|
||||
[[autodoc]] hooks.group_offloading.apply_group_offloading
|
||||
|
||||
@@ -23,60 +23,32 @@ You should install 🤗 Diffusers in a [virtual environment](https://docs.python
|
||||
If you're unfamiliar with Python virtual environments, take a look at this [guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
|
||||
A virtual environment makes it easier to manage different projects and avoid compatibility issues between dependencies.
|
||||
|
||||
Create a virtual environment with Python or [uv](https://docs.astral.sh/uv/) (refer to [Installation](https://docs.astral.sh/uv/getting-started/installation/) for installation instructions), a fast Rust-based Python package and project manager.
|
||||
|
||||
<hfoptions id="install">
|
||||
<hfoption id="uv">
|
||||
Start by creating a virtual environment in your project directory:
|
||||
|
||||
```bash
|
||||
uv venv my-env
|
||||
source my-env/bin/activate
|
||||
python -m venv .env
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Python">
|
||||
Activate the virtual environment:
|
||||
|
||||
```bash
|
||||
python -m venv my-env
|
||||
source my-env/bin/activate
|
||||
source .env/bin/activate
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
You should also install 🤗 Transformers because 🤗 Diffusers relies on its models.
|
||||
You should also install 🤗 Transformers because 🤗 Diffusers relies on its models:
|
||||
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
PyTorch only supports Python 3.8 - 3.11 on Windows. Install Diffusers with uv.
|
||||
|
||||
```bash
|
||||
uv install diffusers["torch"] transformers
|
||||
```
|
||||
|
||||
You can also install Diffusers with pip.
|
||||
|
||||
Note - PyTorch only supports Python 3.8 - 3.11 on Windows.
|
||||
```bash
|
||||
pip install diffusers["torch"] transformers
|
||||
```
|
||||
|
||||
</pt>
|
||||
<jax>
|
||||
|
||||
Install Diffusers with uv.
|
||||
|
||||
```bash
|
||||
uv pip install diffusers["flax"] transformers
|
||||
```
|
||||
|
||||
You can also install Diffusers with pip.
|
||||
|
||||
```bash
|
||||
pip install diffusers["flax"] transformers
|
||||
```
|
||||
|
||||
</jax>
|
||||
</frameworkcontent>
|
||||
|
||||
|
||||
@@ -158,83 +158,6 @@ In order to properly offload models after they're called, it is required to run
|
||||
|
||||
</Tip>
|
||||
|
||||
## Group offloading
|
||||
|
||||
Group offloading is the middle ground between sequential and model offloading. It works by offloading groups of internal layers (either `torch.nn.ModuleList` or `torch.nn.Sequential`), which uses less memory than model-level offloading. It is also faster than sequential-level offloading because the number of device synchronizations is reduced.
|
||||
|
||||
To enable group offloading, call the [`~ModelMixin.enable_group_offload`] method on the model if it is a Diffusers model implementation. For any other model implementation, use [`~hooks.group_offloading.apply_group_offloading`]:
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import CogVideoXPipeline
|
||||
from diffusers.hooks import apply_group_offloading
|
||||
from diffusers.utils import export_to_video
|
||||
|
||||
# Load the pipeline
|
||||
onload_device = torch.device("cuda")
|
||||
offload_device = torch.device("cpu")
|
||||
pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16)
|
||||
|
||||
# We can utilize the enable_group_offload method for Diffusers model implementations
|
||||
pipe.transformer.enable_group_offload(onload_device=onload_device, offload_device=offload_device, offload_type="leaf_level", use_stream=True)
|
||||
|
||||
# For any other model implementations, the apply_group_offloading function can be used
|
||||
apply_group_offloading(pipe.text_encoder, onload_device=onload_device, offload_type="block_level", num_blocks_per_group=2)
|
||||
apply_group_offloading(pipe.vae, onload_device=onload_device, offload_type="leaf_level")
|
||||
|
||||
prompt = (
|
||||
"A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. "
|
||||
"The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other "
|
||||
"pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, "
|
||||
"casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. "
|
||||
"The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical "
|
||||
"atmosphere of this unique musical performance."
|
||||
)
|
||||
video = pipe(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0]
|
||||
# This utilized about 14.79 GB. It can be further reduced by using tiling and using leaf_level offloading throughout the pipeline.
|
||||
print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB")
|
||||
export_to_video(video, "output.mp4", fps=8)
|
||||
```
|
||||
|
||||
Group offloading (for CUDA devices with support for asynchronous data transfer streams) overlaps data transfer and computation to reduce the overall execution time compared to sequential offloading. This is enabled using layer prefetching with CUDA streams. The next layer to be executed is loaded onto the accelerator device while the current layer is being executed - this increases the memory requirements slightly. Group offloading also supports leaf-level offloading (equivalent to sequential CPU offloading) but can be made much faster when using streams.
|
||||
|
||||
## FP8 layerwise weight-casting
|
||||
|
||||
PyTorch supports `torch.float8_e4m3fn` and `torch.float8_e5m2` as weight storage dtypes, but they can't be used for computation in many different tensor operations due to unimplemented kernel support. However, you can use these dtypes to store model weights in fp8 precision and upcast them on-the-fly when the layers are used in the forward pass. This is known as layerwise weight-casting.
|
||||
|
||||
Typically, inference on most models is done with `torch.float16` or `torch.bfloat16` weight/computation precision. Layerwise weight-casting cuts down the memory footprint of the model weights by approximately half.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import CogVideoXPipeline, CogVideoXTransformer3DModel
|
||||
from diffusers.utils import export_to_video
|
||||
|
||||
model_id = "THUDM/CogVideoX-5b"
|
||||
|
||||
# Load the model in bfloat16 and enable layerwise casting
|
||||
transformer = CogVideoXTransformer3DModel.from_pretrained(model_id, subfolder="transformer", torch_dtype=torch.bfloat16)
|
||||
transformer.enable_layerwise_casting(storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16)
|
||||
|
||||
# Load the pipeline
|
||||
pipe = CogVideoXPipeline.from_pretrained(model_id, transformer=transformer, torch_dtype=torch.bfloat16)
|
||||
pipe.to("cuda")
|
||||
|
||||
prompt = (
|
||||
"A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. "
|
||||
"The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other "
|
||||
"pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, "
|
||||
"casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. "
|
||||
"The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical "
|
||||
"atmosphere of this unique musical performance."
|
||||
)
|
||||
video = pipe(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0]
|
||||
export_to_video(video, "output.mp4", fps=8)
|
||||
```
|
||||
|
||||
In the above example, layerwise casting is enabled on the transformer component of the pipeline. By default, certain layers are skipped from the FP8 weight casting because it can lead to significant degradation of generation quality. The normalization and modulation related weight parameters are also skipped by default.
|
||||
|
||||
However, you gain more control and flexibility by directly utilizing the [`~hooks.layerwise_casting.apply_layerwise_casting`] function instead of [`~ModelMixin.enable_layerwise_casting`].
|
||||
|
||||
## Channels-last memory format
|
||||
|
||||
The channels-last memory format is an alternative way of ordering NCHW tensors in memory to preserve dimension ordering. Channels-last tensors are ordered in such a way that the channels become the densest dimension (storing images pixel-per-pixel). Since not all operators currently support the channels-last format, it may result in worst performance but you should still try and see if it works for your model.
|
||||
|
||||
@@ -29,7 +29,7 @@ However, it is hard to decide when to reuse the cache to ensure quality generate
|
||||
This achieves a 2x speedup on FLUX.1-dev and HunyuanVideo inference with very good quality.
|
||||
|
||||
<figure>
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/ada-cache.png" alt="Cache in Diffusion Transformer" />
|
||||
<img src="https://huggingface.co/datasets/chengzeyi/documentation-images/resolve/main/diffusers/para-attn/ada-cache.png" alt="Cache in Diffusion Transformer" />
|
||||
<figcaption>How AdaCache works, First Block Cache is a variant of it</figcaption>
|
||||
</figure>
|
||||
|
||||
|
||||
@@ -339,10 +339,7 @@ import torch
|
||||
from huggingface_hub.repocard import RepoCard
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16,
|
||||
).to("cuda")
|
||||
model_id = "sayakpaul/custom-diffusion-cat-wooden-pot"
|
||||
pipeline = DiffusionPipeline.from_pretrained("sayakpaul/custom-diffusion-cat-wooden-pot", torch_dtype=torch.float16).to("cuda")
|
||||
pipeline.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin")
|
||||
pipeline.load_textual_inversion(model_id, weight_name="<new1>.bin")
|
||||
pipeline.load_textual_inversion(model_id, weight_name="<new2>.bin")
|
||||
|
||||
@@ -221,7 +221,3 @@ pipe.delete_adapters("toy")
|
||||
pipe.get_active_adapters()
|
||||
["pixel"]
|
||||
```
|
||||
|
||||
## PeftInputAutocastDisableHook
|
||||
|
||||
[[autodoc]] hooks.layerwise_casting.PeftInputAutocastDisableHook
|
||||
|
||||
@@ -461,12 +461,12 @@ Chain it to an upscaler pipeline to increase the image resolution:
|
||||
from diffusers import StableDiffusionLatentUpscalePipeline
|
||||
|
||||
upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(
|
||||
"stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True
|
||||
"stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
|
||||
)
|
||||
upscaler.enable_model_cpu_offload()
|
||||
upscaler.enable_xformers_memory_efficient_attention()
|
||||
|
||||
image_2 = upscaler(prompt, image=image_1).images[0]
|
||||
image_2 = upscaler(prompt, image=image_1, output_type="latent").images[0]
|
||||
```
|
||||
|
||||
Finally, chain it to a super-resolution pipeline to further enhance the resolution:
|
||||
|
||||
@@ -1,317 +0,0 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
# OmniGen
|
||||
|
||||
OmniGen is an image generation model. Unlike existing text-to-image models, OmniGen is a single model designed to handle a variety of tasks (e.g., text-to-image, image editing, controllable generation). It has the following features:
|
||||
- Minimalist model architecture, consisting of only a VAE and a transformer module, for joint modeling of text and images.
|
||||
- Support for multimodal inputs. It can process any text-image mixed data as instructions for image generation, rather than relying solely on text.
|
||||
|
||||
For more information, please refer to the [paper](https://arxiv.org/pdf/2409.11340).
|
||||
This guide will walk you through using OmniGen for various tasks and use cases.
|
||||
|
||||
## Load model checkpoints
|
||||
|
||||
Model weights may be stored in separate subfolders on the Hub or locally, in which case, you should use the [`~DiffusionPipeline.from_pretrained`] method.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import OmniGenPipeline
|
||||
|
||||
pipe = OmniGenPipeline.from_pretrained("Shitao/OmniGen-v1-diffusers", torch_dtype=torch.bfloat16)
|
||||
```
|
||||
|
||||
## Text-to-image
|
||||
|
||||
For text-to-image, pass a text prompt. By default, OmniGen generates a 1024x1024 image.
|
||||
You can try setting the `height` and `width` parameters to generate images with different size.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import OmniGenPipeline
|
||||
|
||||
pipe = OmniGenPipeline.from_pretrained(
|
||||
"Shitao/OmniGen-v1-diffusers",
|
||||
torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.to("cuda")
|
||||
|
||||
prompt = "Realistic photo. A young woman sits on a sofa, holding a book and facing the camera. She wears delicate silver hoop earrings adorned with tiny, sparkling diamonds that catch the light, with her long chestnut hair cascading over her shoulders. Her eyes are focused and gentle, framed by long, dark lashes. She is dressed in a cozy cream sweater, which complements her warm, inviting smile. Behind her, there is a table with a cup of water in a sleek, minimalist blue mug. The background is a serene indoor setting with soft natural light filtering through a window, adorned with tasteful art and flowers, creating a cozy and peaceful ambiance. 4K, HD."
|
||||
image = pipe(
|
||||
prompt=prompt,
|
||||
height=1024,
|
||||
width=1024,
|
||||
guidance_scale=3,
|
||||
generator=torch.Generator(device="cpu").manual_seed(111),
|
||||
).images[0]
|
||||
image.save("output.png")
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/t2i_woman_with_book.png" alt="generated image"/>
|
||||
</div>
|
||||
|
||||
## Image edit
|
||||
|
||||
OmniGen supports multimodal inputs.
|
||||
When the input includes an image, you need to add a placeholder `<img><|image_1|></img>` in the text prompt to represent the image.
|
||||
It is recommended to enable `use_input_image_size_as_output` to keep the edited image the same size as the original image.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import OmniGenPipeline
|
||||
from diffusers.utils import load_image
|
||||
|
||||
pipe = OmniGenPipeline.from_pretrained(
|
||||
"Shitao/OmniGen-v1-diffusers",
|
||||
torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.to("cuda")
|
||||
|
||||
prompt="<img><|image_1|></img> Remove the woman's earrings. Replace the mug with a clear glass filled with sparkling iced cola."
|
||||
input_images=[load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/t2i_woman_with_book.png")]
|
||||
image = pipe(
|
||||
prompt=prompt,
|
||||
input_images=input_images,
|
||||
guidance_scale=2,
|
||||
img_guidance_scale=1.6,
|
||||
use_input_image_size_as_output=True,
|
||||
generator=torch.Generator(device="cpu").manual_seed(222)
|
||||
).images[0]
|
||||
image.save("output.png")
|
||||
```
|
||||
|
||||
<div class="flex flex-row gap-4">
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/t2i_woman_with_book.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/edit.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">edited image</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
OmniGen has some interesting features, such as visual reasoning, as shown in the example below.
|
||||
|
||||
```python
|
||||
prompt="If the woman is thirsty, what should she take? Find it in the image and highlight it in blue. <img><|image_1|></img>"
|
||||
input_images=[load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/edit.png")]
|
||||
image = pipe(
|
||||
prompt=prompt,
|
||||
input_images=input_images,
|
||||
guidance_scale=2,
|
||||
img_guidance_scale=1.6,
|
||||
use_input_image_size_as_output=True,
|
||||
generator=torch.Generator(device="cpu").manual_seed(0)
|
||||
).images[0]
|
||||
image.save("output.png")
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/reasoning.png" alt="generated image"/>
|
||||
</div>
|
||||
|
||||
## Controllable generation
|
||||
|
||||
OmniGen can handle several classic computer vision tasks. As shown below, OmniGen can detect human skeletons in input images, which can be used as control conditions to generate new images.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import OmniGenPipeline
|
||||
from diffusers.utils import load_image
|
||||
|
||||
pipe = OmniGenPipeline.from_pretrained(
|
||||
"Shitao/OmniGen-v1-diffusers",
|
||||
torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.to("cuda")
|
||||
|
||||
prompt="Detect the skeleton of human in this image: <img><|image_1|></img>"
|
||||
input_images=[load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/edit.png")]
|
||||
image1 = pipe(
|
||||
prompt=prompt,
|
||||
input_images=input_images,
|
||||
guidance_scale=2,
|
||||
img_guidance_scale=1.6,
|
||||
use_input_image_size_as_output=True,
|
||||
generator=torch.Generator(device="cpu").manual_seed(333)
|
||||
).images[0]
|
||||
image1.save("image1.png")
|
||||
|
||||
prompt="Generate a new photo using the following picture and text as conditions: <img><|image_1|></img>\n A young boy is sitting on a sofa in the library, holding a book. His hair is neatly combed, and a faint smile plays on his lips, with a few freckles scattered across his cheeks. The library is quiet, with rows of shelves filled with books stretching out behind him."
|
||||
input_images=[load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/skeletal.png")]
|
||||
image2 = pipe(
|
||||
prompt=prompt,
|
||||
input_images=input_images,
|
||||
guidance_scale=2,
|
||||
img_guidance_scale=1.6,
|
||||
use_input_image_size_as_output=True,
|
||||
generator=torch.Generator(device="cpu").manual_seed(333)
|
||||
).images[0]
|
||||
image2.save("image2.png")
|
||||
```
|
||||
|
||||
<div class="flex flex-row gap-4">
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/edit.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/skeletal.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">detected skeleton</figcaption>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/skeletal2img.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">skeleton to image</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
OmniGen can also directly use relevant information from input images to generate new images.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import OmniGenPipeline
|
||||
from diffusers.utils import load_image
|
||||
|
||||
pipe = OmniGenPipeline.from_pretrained(
|
||||
"Shitao/OmniGen-v1-diffusers",
|
||||
torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.to("cuda")
|
||||
|
||||
prompt="Following the pose of this image <img><|image_1|></img>, generate a new photo: A young boy is sitting on a sofa in the library, holding a book. His hair is neatly combed, and a faint smile plays on his lips, with a few freckles scattered across his cheeks. The library is quiet, with rows of shelves filled with books stretching out behind him."
|
||||
input_images=[load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/edit.png")]
|
||||
image = pipe(
|
||||
prompt=prompt,
|
||||
input_images=input_images,
|
||||
guidance_scale=2,
|
||||
img_guidance_scale=1.6,
|
||||
use_input_image_size_as_output=True,
|
||||
generator=torch.Generator(device="cpu").manual_seed(0)
|
||||
).images[0]
|
||||
image.save("output.png")
|
||||
```
|
||||
|
||||
<div class="flex flex-row gap-4">
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/same_pose.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
## ID and object preserving
|
||||
|
||||
OmniGen can generate multiple images based on the people and objects in the input image and supports inputting multiple images simultaneously.
|
||||
Additionally, OmniGen can extract desired objects from an image containing multiple objects based on instructions.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import OmniGenPipeline
|
||||
from diffusers.utils import load_image
|
||||
|
||||
pipe = OmniGenPipeline.from_pretrained(
|
||||
"Shitao/OmniGen-v1-diffusers",
|
||||
torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.to("cuda")
|
||||
|
||||
prompt="A man and a woman are sitting at a classroom desk. The man is the man with yellow hair in <img><|image_1|></img>. The woman is the woman on the left of <img><|image_2|></img>"
|
||||
input_image_1 = load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/3.png")
|
||||
input_image_2 = load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/4.png")
|
||||
input_images=[input_image_1, input_image_2]
|
||||
image = pipe(
|
||||
prompt=prompt,
|
||||
input_images=input_images,
|
||||
height=1024,
|
||||
width=1024,
|
||||
guidance_scale=2.5,
|
||||
img_guidance_scale=1.6,
|
||||
generator=torch.Generator(device="cpu").manual_seed(666)
|
||||
).images[0]
|
||||
image.save("output.png")
|
||||
```
|
||||
|
||||
<div class="flex flex-row gap-4">
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/3.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">input_image_1</figcaption>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/4.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">input_image_2</figcaption>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/id2.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
```py
|
||||
import torch
|
||||
from diffusers import OmniGenPipeline
|
||||
from diffusers.utils import load_image
|
||||
|
||||
pipe = OmniGenPipeline.from_pretrained(
|
||||
"Shitao/OmniGen-v1-diffusers",
|
||||
torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.to("cuda")
|
||||
|
||||
prompt="A woman is walking down the street, wearing a white long-sleeve blouse with lace details on the sleeves, paired with a blue pleated skirt. The woman is <img><|image_1|></img>. The long-sleeve blouse and a pleated skirt are <img><|image_2|></img>."
|
||||
input_image_1 = load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/emma.jpeg")
|
||||
input_image_2 = load_image("https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/dress.jpg")
|
||||
input_images=[input_image_1, input_image_2]
|
||||
image = pipe(
|
||||
prompt=prompt,
|
||||
input_images=input_images,
|
||||
height=1024,
|
||||
width=1024,
|
||||
guidance_scale=2.5,
|
||||
img_guidance_scale=1.6,
|
||||
generator=torch.Generator(device="cpu").manual_seed(666)
|
||||
).images[0]
|
||||
image.save("output.png")
|
||||
```
|
||||
|
||||
<div class="flex flex-row gap-4">
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/emma.jpeg"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">person image</figcaption>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/dress.jpg"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">clothe image</figcaption>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<img class="rounded-xl" src="https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/tryon.png"/>
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
## Optimization when using multiple images
|
||||
|
||||
For text-to-image task, OmniGen requires minimal memory and time costs (9GB memory and 31s for a 1024x1024 image on A800 GPU).
|
||||
However, when using input images, the computational cost increases.
|
||||
|
||||
Here are some guidelines to help you reduce computational costs when using multiple images. The experiments are conducted on an A800 GPU with two input images.
|
||||
|
||||
Like other pipelines, you can reduce memory usage by offloading the model: `pipe.enable_model_cpu_offload()` or `pipe.enable_sequential_cpu_offload() `.
|
||||
In OmniGen, you can also decrease computational overhead by reducing the `max_input_image_size`.
|
||||
The memory consumption for different image sizes is shown in the table below:
|
||||
|
||||
| Method | Memory Usage |
|
||||
|---------------------------|--------------|
|
||||
| max_input_image_size=1024 | 40GB |
|
||||
| max_input_image_size=512 | 17GB |
|
||||
| max_input_image_size=256 | 14GB |
|
||||
|
||||
@@ -106,7 +106,7 @@ Let's try it out!
|
||||
|
||||
## Deconstruct the Stable Diffusion pipeline
|
||||
|
||||
Stable Diffusion is a text-to-image *latent diffusion* model. It is called a latent diffusion model because it works with a lower-dimensional representation of the image instead of the actual pixel space, which makes it more memory efficient. The encoder compresses the image into a smaller representation, and a decoder converts the compressed representation back into an image. For text-to-image models, you'll need a tokenizer and an encoder to generate text embeddings. From the previous example, you already know you need a UNet model and a scheduler.
|
||||
Stable Diffusion is a text-to-image *latent diffusion* model. It is called a latent diffusion model because it works with a lower-dimensional representation of the image instead of the actual pixel space, which makes it more memory efficient. The encoder compresses the image into a smaller representation, and a decoder to convert the compressed representation back into an image. For text-to-image models, you'll need a tokenizer and an encoder to generate text embeddings. From the previous example, you already know you need a UNet model and a scheduler.
|
||||
|
||||
As you can see, this is already more complex than the DDPM pipeline which only contains a UNet model. The Stable Diffusion model has three separate pretrained models.
|
||||
|
||||
|
||||
@@ -40,9 +40,9 @@ Training examples show how to pretrain or fine-tune diffusion models for a varie
|
||||
| [**Text-to-Image fine-tuning**](./text_to_image) | ✅ | ✅ |
|
||||
| [**Textual Inversion**](./textual_inversion) | ✅ | - | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb)
|
||||
| [**Dreambooth**](./dreambooth) | ✅ | - | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb)
|
||||
| [**ControlNet**](./controlnet) | ✅ | ✅ | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/controlnet.ipynb)
|
||||
| [**InstructPix2Pix**](./instruct_pix2pix) | ✅ | ✅ | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/InstructPix2Pix_using_diffusers.ipynb)
|
||||
| [**Reinforcement Learning for Control**](./reinforcement_learning) | - | - | [Notebook1](https://github.com/huggingface/notebooks/blob/main/diffusers/reinforcement_learning_for_control.ipynb), [Notebook2](https://github.com/huggingface/notebooks/blob/main/diffusers/reinforcement_learning_with_diffusers.ipynb)
|
||||
| [**ControlNet**](./controlnet) | ✅ | ✅ | -
|
||||
| [**InstructPix2Pix**](./instruct_pix2pix) | ✅ | ✅ | -
|
||||
| [**Reinforcement Learning for Control**](./reinforcement_learning) | - | - | coming soon.
|
||||
|
||||
## Community
|
||||
|
||||
|
||||
707
examples/community/README.md
Normal file → Executable file
707
examples/community/README.md
Normal file → Executable file
@@ -24,35 +24,32 @@ Please also check out our [Community Scripts](https://github.com/huggingface/dif
|
||||
| Speech to Image | Using automatic-speech-recognition to transcribe text and Stable Diffusion to generate images | [Speech to Image](#speech-to-image) |[Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/speech_to_image.ipynb) | [Mikail Duzenli](https://github.com/MikailINTech)
|
||||
| Wild Card Stable Diffusion | Stable Diffusion Pipeline that supports prompts that contain wildcard terms (indicated by surrounding double underscores), with values instantiated randomly from a corresponding txt file or a dictionary of possible values | [Wildcard Stable Diffusion](#wildcard-stable-diffusion) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/wildcard_stable_diffusion.ipynb) | [Shyam Sudhakaran](https://github.com/shyamsn97) |
|
||||
| [Composable Stable Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/) | Stable Diffusion Pipeline that supports prompts that contain "|" in prompts (as an AND condition) and weights (separated by "|" as well) to positively / negatively weight prompts. | [Composable Stable Diffusion](#composable-stable-diffusion) | - | [Mark Rich](https://github.com/MarkRich) |
|
||||
| Seed Resizing Stable Diffusion | Stable Diffusion Pipeline that supports resizing an image and retaining the concepts of the 512 by 512 generation. | [Seed Resizing](#seed-resizing) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/seed_resizing.ipynb) | [Mark Rich](https://github.com/MarkRich) |
|
||||
| Imagic Stable Diffusion | Stable Diffusion Pipeline that enables writing a text prompt to edit an existing image | [Imagic Stable Diffusion](#imagic-stable-diffusion) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/imagic_stable_diffusion.ipynb) | [Mark Rich](https://github.com/MarkRich) |
|
||||
| Seed Resizing Stable Diffusion | Stable Diffusion Pipeline that supports resizing an image and retaining the concepts of the 512 by 512 generation. | [Seed Resizing](#seed-resizing) | - | [Mark Rich](https://github.com/MarkRich) |
|
||||
| Imagic Stable Diffusion | Stable Diffusion Pipeline that enables writing a text prompt to edit an existing image | [Imagic Stable Diffusion](#imagic-stable-diffusion) | - | [Mark Rich](https://github.com/MarkRich) |
|
||||
| Multilingual Stable Diffusion | Stable Diffusion Pipeline that supports prompts in 50 different languages. | [Multilingual Stable Diffusion](#multilingual-stable-diffusion-pipeline) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/multilingual_stable_diffusion.ipynb) | [Juan Carlos Piñeros](https://github.com/juancopi81) |
|
||||
| GlueGen Stable Diffusion | Stable Diffusion Pipeline that supports prompts in different languages using GlueGen adapter. | [GlueGen Stable Diffusion](#gluegen-stable-diffusion-pipeline) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/gluegen_stable_diffusion.ipynb) | [Phạm Hồng Vinh](https://github.com/rootonchair) |
|
||||
| GlueGen Stable Diffusion | Stable Diffusion Pipeline that supports prompts in different languages using GlueGen adapter. | [GlueGen Stable Diffusion](#gluegen-stable-diffusion-pipeline) | - | [Phạm Hồng Vinh](https://github.com/rootonchair) |
|
||||
| Image to Image Inpainting Stable Diffusion | Stable Diffusion Pipeline that enables the overlaying of two images and subsequent inpainting | [Image to Image Inpainting Stable Diffusion](#image-to-image-inpainting-stable-diffusion) | - | [Alex McKinney](https://github.com/vvvm23) |
|
||||
| Text Based Inpainting Stable Diffusion | Stable Diffusion Inpainting Pipeline that enables passing a text prompt to generate the mask for inpainting | [Text Based Inpainting Stable Diffusion](#text-based-inpainting-stable-diffusion) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/text_based_inpainting_stable_dffusion.ipynb) | [Dhruv Karan](https://github.com/unography) |
|
||||
| Text Based Inpainting Stable Diffusion | Stable Diffusion Inpainting Pipeline that enables passing a text prompt to generate the mask for inpainting | [Text Based Inpainting Stable Diffusion](#text-based-inpainting-stable-diffusion) | - | [Dhruv Karan](https://github.com/unography) |
|
||||
| Bit Diffusion | Diffusion on discrete data | [Bit Diffusion](#bit-diffusion) | - | [Stuti R.](https://github.com/kingstut) |
|
||||
| K-Diffusion Stable Diffusion | Run Stable Diffusion with any of [K-Diffusion's samplers](https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/sampling.py) | [Stable Diffusion with K Diffusion](#stable-diffusion-with-k-diffusion) | - | [Patrick von Platen](https://github.com/patrickvonplaten/) |
|
||||
| Checkpoint Merger Pipeline | Diffusion Pipeline that enables merging of saved model checkpoints | [Checkpoint Merger Pipeline](#checkpoint-merger-pipeline) | - | [Naga Sai Abhinay Devarinti](https://github.com/Abhinay1997/) |
|
||||
| Stable Diffusion v1.1-1.4 Comparison | Run all 4 model checkpoints for Stable Diffusion and compare their results together | [Stable Diffusion Comparison](#stable-diffusion-comparisons) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/stable_diffusion_comparison.ipynb) | [Suvaditya Mukherjee](https://github.com/suvadityamuk) |
|
||||
| MagicMix | Diffusion Pipeline for semantic mixing of an image and a text prompt | [MagicMix](#magic-mix) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/magic_mix.ipynb) | [Partho Das](https://github.com/daspartho) |
|
||||
| MagicMix | Diffusion Pipeline for semantic mixing of an image and a text prompt | [MagicMix](#magic-mix) | - | [Partho Das](https://github.com/daspartho) |
|
||||
| Stable UnCLIP | Diffusion Pipeline for combining prior model (generate clip image embedding from text, UnCLIPPipeline `"kakaobrain/karlo-v1-alpha"`) and decoder pipeline (decode clip image embedding to image, StableDiffusionImageVariationPipeline `"lambdalabs/sd-image-variations-diffusers"` ). | [Stable UnCLIP](#stable-unclip) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/stable_unclip.ipynb) | [Ray Wang](https://wrong.wang) |
|
||||
| UnCLIP Text Interpolation Pipeline | Diffusion Pipeline that allows passing two prompts and produces images while interpolating between the text-embeddings of the two prompts | [UnCLIP Text Interpolation Pipeline](#unclip-text-interpolation-pipeline) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/unclip_text_interpolation.ipynb)| [Naga Sai Abhinay Devarinti](https://github.com/Abhinay1997/) |
|
||||
| UnCLIP Image Interpolation Pipeline | Diffusion Pipeline that allows passing two images/image_embeddings and produces images while interpolating between their image-embeddings | [UnCLIP Image Interpolation Pipeline](#unclip-image-interpolation-pipeline) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/unclip_image_interpolation.ipynb)| [Naga Sai Abhinay Devarinti](https://github.com/Abhinay1997/) |
|
||||
| UnCLIP Image Interpolation Pipeline | Diffusion Pipeline that allows passing two images/image_embeddings and produces images while interpolating between their image-embeddings | [UnCLIP Image Interpolation Pipeline](#unclip-image-interpolation-pipeline) | - | [Naga Sai Abhinay Devarinti](https://github.com/Abhinay1997/) |
|
||||
| DDIM Noise Comparative Analysis Pipeline | Investigating how the diffusion models learn visual concepts from each noise level (which is a contribution of [P2 weighting (CVPR 2022)](https://arxiv.org/abs/2204.00227)) | [DDIM Noise Comparative Analysis Pipeline](#ddim-noise-comparative-analysis-pipeline) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/ddim_noise_comparative_analysis.ipynb)| [Aengus (Duc-Anh)](https://github.com/aengusng8) |
|
||||
| CLIP Guided Img2Img Stable Diffusion Pipeline | Doing CLIP guidance for image to image generation with Stable Diffusion | [CLIP Guided Img2Img Stable Diffusion](#clip-guided-img2img-stable-diffusion) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/clip_guided_img2img_stable_diffusion.ipynb) | [Nipun Jindal](https://github.com/nipunjindal/) |
|
||||
| CLIP Guided Img2Img Stable Diffusion Pipeline | Doing CLIP guidance for image to image generation with Stable Diffusion | [CLIP Guided Img2Img Stable Diffusion](#clip-guided-img2img-stable-diffusion) | - | [Nipun Jindal](https://github.com/nipunjindal/) |
|
||||
| TensorRT Stable Diffusion Text to Image Pipeline | Accelerates the Stable Diffusion Text2Image Pipeline using TensorRT | [TensorRT Stable Diffusion Text to Image Pipeline](#tensorrt-text2image-stable-diffusion-pipeline) | - | [Asfiya Baig](https://github.com/asfiyab-nvidia) |
|
||||
| EDICT Image Editing Pipeline | Diffusion pipeline for text-guided image editing | [EDICT Image Editing Pipeline](#edict-image-editing-pipeline) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/edict_image_pipeline.ipynb) | [Joqsan Azocar](https://github.com/Joqsan) |
|
||||
| Stable Diffusion RePaint | Stable Diffusion pipeline using [RePaint](https://arxiv.org/abs/2201.09865) for inpainting. | [Stable Diffusion RePaint](#stable-diffusion-repaint )|[Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/stable_diffusion_repaint.ipynb)| [Markus Pobitzer](https://github.com/Markus-Pobitzer) |
|
||||
| TensorRT Stable Diffusion Image to Image Pipeline | Accelerates the Stable Diffusion Image2Image Pipeline using TensorRT | [TensorRT Stable Diffusion Image to Image Pipeline](#tensorrt-image2image-stable-diffusion-pipeline) | - | [Asfiya Baig](https://github.com/asfiyab-nvidia) |
|
||||
| Stable Diffusion IPEX Pipeline | Accelerate Stable Diffusion inference pipeline with BF16/FP32 precision on Intel Xeon CPUs with [IPEX](https://github.com/intel/intel-extension-for-pytorch) | [Stable Diffusion on IPEX](#stable-diffusion-on-ipex) | - | [Yingjie Han](https://github.com/yingjie-han/) |
|
||||
| CLIP Guided Images Mixing Stable Diffusion Pipeline | Сombine images using usual diffusion models. | [CLIP Guided Images Mixing Using Stable Diffusion](#clip-guided-images-mixing-with-stable-diffusion) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/clip_guided_images_mixing_with_stable_diffusion.ipynb) | [Karachev Denis](https://github.com/TheDenk) |
|
||||
| CLIP Guided Images Mixing Stable Diffusion Pipeline | Сombine images using usual diffusion models. | [CLIP Guided Images Mixing Using Stable Diffusion](#clip-guided-images-mixing-with-stable-diffusion) | - | [Karachev Denis](https://github.com/TheDenk) |
|
||||
| TensorRT Stable Diffusion Inpainting Pipeline | Accelerates the Stable Diffusion Inpainting Pipeline using TensorRT | [TensorRT Stable Diffusion Inpainting Pipeline](#tensorrt-inpainting-stable-diffusion-pipeline) | - | [Asfiya Baig](https://github.com/asfiyab-nvidia) |
|
||||
| IADB Pipeline | Implementation of [Iterative α-(de)Blending: a Minimalist Deterministic Diffusion Model](https://arxiv.org/abs/2305.03486) | [IADB Pipeline](#iadb-pipeline) | - | [Thomas Chambon](https://github.com/tchambon)
|
||||
| Zero1to3 Pipeline | Implementation of [Zero-1-to-3: Zero-shot One Image to 3D Object](https://arxiv.org/abs/2303.11328) | [Zero1to3 Pipeline](#zero1to3-pipeline) | - | [Xin Kong](https://github.com/kxhit) |
|
||||
| Stable Diffusion XL Long Weighted Prompt Pipeline | A pipeline support unlimited length of prompt and negative prompt, use A1111 style of prompt weighting | [Stable Diffusion XL Long Weighted Prompt Pipeline](#stable-diffusion-xl-long-weighted-prompt-pipeline) | [](https://colab.research.google.com/drive/1LsqilswLR40XLLcp6XFOl5nKb_wOe26W?usp=sharing) | [Andrew Zhu](https://xhinker.medium.com/) |
|
||||
| Stable Diffusion Mixture Tiling Pipeline SD 1.5 | A pipeline generates cohesive images by integrating multiple diffusion processes, each focused on a specific image region and considering boundary effects for smooth blending | [Stable Diffusion Mixture Tiling Pipeline SD 1.5](#stable-diffusion-mixture-tiling-pipeline-sd-15) | [](https://huggingface.co/spaces/albarji/mixture-of-diffusers) | [Álvaro B Jiménez](https://github.com/albarji/) |
|
||||
| Stable Diffusion Mixture Canvas Pipeline SD 1.5 | A pipeline generates cohesive images by integrating multiple diffusion processes, each focused on a specific image region and considering boundary effects for smooth blending. Works by defining a list of Text2Image region objects that detail the region of influence of each diffuser. | [Stable Diffusion Mixture Canvas Pipeline SD 1.5](#stable-diffusion-mixture-canvas-pipeline-sd-15) | [](https://huggingface.co/spaces/albarji/mixture-of-diffusers) | [Álvaro B Jiménez](https://github.com/albarji/) |
|
||||
| Stable Diffusion Mixture Tiling Pipeline SDXL | A pipeline generates cohesive images by integrating multiple diffusion processes, each focused on a specific image region and considering boundary effects for smooth blending | [Stable Diffusion Mixture Tiling Pipeline SDXL](#stable-diffusion-mixture-tiling-pipeline-sdxl) | [](https://huggingface.co/spaces/elismasilva/mixture-of-diffusers-sdxl-tiling) | [Eliseu Silva](https://github.com/DEVAIEXP/) |
|
||||
| FABRIC - Stable Diffusion with feedback Pipeline | pipeline supports feedback from liked and disliked images | [Stable Diffusion Fabric Pipeline](#stable-diffusion-fabric-pipeline) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/stable_diffusion_fabric.ipynb)| [Shauray Singh](https://shauray8.github.io/about_shauray/) |
|
||||
| sketch inpaint - Inpainting with non-inpaint Stable Diffusion | sketch inpaint much like in automatic1111 | [Masked Im2Im Stable Diffusion Pipeline](#stable-diffusion-masked-im2im) | - | [Anatoly Belikov](https://github.com/noskill) |
|
||||
| sketch inpaint xl - Inpainting with non-inpaint Stable Diffusion | sketch inpaint much like in automatic1111 | [Masked Im2Im Stable Diffusion XL Pipeline](#stable-diffusion-xl-masked-im2im) | - | [Anatoly Belikov](https://github.com/noskill) |
|
||||
@@ -60,7 +57,7 @@ Please also check out our [Community Scripts](https://github.com/huggingface/dif
|
||||
| Latent Consistency Pipeline | Implementation of [Latent Consistency Models: Synthesizing High-Resolution Images with Few-Step Inference](https://arxiv.org/abs/2310.04378) | [Latent Consistency Pipeline](#latent-consistency-pipeline) | - | [Simian Luo](https://github.com/luosiallen) |
|
||||
| Latent Consistency Img2img Pipeline | Img2img pipeline for Latent Consistency Models | [Latent Consistency Img2Img Pipeline](#latent-consistency-img2img-pipeline) | - | [Logan Zoellner](https://github.com/nagolinc) |
|
||||
| Latent Consistency Interpolation Pipeline | Interpolate the latent space of Latent Consistency Models with multiple prompts | [Latent Consistency Interpolation Pipeline](#latent-consistency-interpolation-pipeline) | [](https://colab.research.google.com/drive/1pK3NrLWJSiJsBynLns1K1-IDTW9zbPvl?usp=sharing) | [Aryan V S](https://github.com/a-r-r-o-w) |
|
||||
| SDE Drag Pipeline | The pipeline supports drag editing of images using stochastic differential equations | [SDE Drag Pipeline](#sde-drag-pipeline) | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/sde_drag.ipynb) | [NieShen](https://github.com/NieShenRuc) [Fengqi Zhu](https://github.com/Monohydroxides) |
|
||||
| SDE Drag Pipeline | The pipeline supports drag editing of images using stochastic differential equations | [SDE Drag Pipeline](#sde-drag-pipeline) | - | [NieShen](https://github.com/NieShenRuc) [Fengqi Zhu](https://github.com/Monohydroxides) |
|
||||
| Regional Prompting Pipeline | Assign multiple prompts for different regions | [Regional Prompting Pipeline](#regional-prompting-pipeline) | - | [hako-mikan](https://github.com/hako-mikan) |
|
||||
| LDM3D-sr (LDM3D upscaler) | Upscale low resolution RGB and depth inputs to high resolution | [StableDiffusionUpscaleLDM3D Pipeline](https://github.com/estelleafl/diffusers/tree/ldm3d_upscaler_community/examples/community#stablediffusionupscaleldm3d-pipeline) | - | [Estelle Aflalo](https://github.com/estelleafl) |
|
||||
| AnimateDiff ControlNet Pipeline | Combines AnimateDiff with precise motion control using ControlNets | [AnimateDiff ControlNet Pipeline](#animatediff-controlnet-pipeline) | [](https://colab.research.google.com/drive/1SKboYeGjEQmQPWoFC0aLYpBlYdHXkvAu?usp=sharing) | [Aryan V S](https://github.com/a-r-r-o-w) and [Edoardo Botta](https://github.com/EdoardoBotta) |
|
||||
@@ -80,8 +77,6 @@ Please also check out our [Community Scripts](https://github.com/huggingface/dif
|
||||
PIXART-α Controlnet pipeline | Implementation of the controlnet model for pixart alpha and its diffusers pipeline | [PIXART-α Controlnet pipeline](#pixart-α-controlnet-pipeline) | - | [Raul Ciotescu](https://github.com/raulc0399/) |
|
||||
| HunyuanDiT Differential Diffusion Pipeline | Applies [Differential Diffusion](https://github.com/exx8/differential-diffusion) to [HunyuanDiT](https://github.com/huggingface/diffusers/pull/8240). | [HunyuanDiT with Differential Diffusion](#hunyuandit-with-differential-diffusion) | [](https://colab.research.google.com/drive/1v44a5fpzyr4Ffr4v2XBQ7BajzG874N4P?usp=sharing) | [Monjoy Choudhury](https://github.com/MnCSSJ4x) |
|
||||
| [🪆Matryoshka Diffusion Models](https://huggingface.co/papers/2310.15111) | A diffusion process that denoises inputs at multiple resolutions jointly and uses a NestedUNet architecture where features and parameters for small scale inputs are nested within those of the large scales. See [original codebase](https://github.com/apple/ml-mdm). | [🪆Matryoshka Diffusion Models](#matryoshka-diffusion-models) | [](https://huggingface.co/spaces/pcuenq/mdm) [](https://colab.research.google.com/gist/tolgacangoz/1f54875fc7aeaabcf284ebde64820966/matryoshka_hf.ipynb) | [M. Tolga Cangöz](https://github.com/tolgacangoz) |
|
||||
| Stable Diffusion XL Attentive Eraser Pipeline |[[AAAI2025 Oral] Attentive Eraser](https://github.com/Anonym0u3/AttentiveEraser) is a novel tuning-free method that enhances object removal capabilities in pre-trained diffusion models.|[Stable Diffusion XL Attentive Eraser Pipeline](#stable-diffusion-xl-attentive-eraser-pipeline)|-|[Wenhao Sun](https://github.com/Anonym0u3) and [Benlei Cui](https://github.com/Benny079)|
|
||||
| Perturbed-Attention Guidance |StableDiffusionPAGPipeline is a modification of StableDiffusionPipeline to support Perturbed-Attention Guidance (PAG).|[Perturbed-Attention Guidance](#perturbed-attention-guidance)|[Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/perturbed_attention_guidance.ipynb)|[Hyoungwon Cho](https://github.com/HyoungwonCho)|
|
||||
|
||||
To load a custom pipeline you just need to pass the `custom_pipeline` argument to `DiffusionPipeline`, as one of the files in `diffusers/examples/community`. Feel free to send a PR with your own pipelines, we will merge them quickly.
|
||||
|
||||
@@ -952,15 +947,10 @@ image.save('./imagic/imagic_image_alpha_2.png')
|
||||
Test seed resizing. Originally generate an image in 512 by 512, then generate image with same seed at 512 by 592 using seed resizing. Finally, generate 512 by 592 using original stable diffusion pipeline.
|
||||
|
||||
```python
|
||||
import os
|
||||
import torch as th
|
||||
import numpy as np
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
# Ensure the save directory exists or create it
|
||||
save_dir = './seed_resize/'
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
|
||||
has_cuda = th.cuda.is_available()
|
||||
device = th.device('cpu' if not has_cuda else 'cuda')
|
||||
|
||||
@@ -974,6 +964,7 @@ def dummy(images, **kwargs):
|
||||
|
||||
pipe.safety_checker = dummy
|
||||
|
||||
|
||||
images = []
|
||||
th.manual_seed(0)
|
||||
generator = th.Generator("cuda").manual_seed(0)
|
||||
@@ -992,14 +983,15 @@ res = pipe(
|
||||
width=width,
|
||||
generator=generator)
|
||||
image = res.images[0]
|
||||
image.save(os.path.join(save_dir, 'seed_resize_{w}_{h}_image.png'.format(w=width, h=height)))
|
||||
image.save('./seed_resize/seed_resize_{w}_{h}_image.png'.format(w=width, h=height))
|
||||
|
||||
|
||||
th.manual_seed(0)
|
||||
generator = th.Generator("cuda").manual_seed(0)
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
custom_pipeline="seed_resize_stable_diffusion"
|
||||
custom_pipeline="/home/mark/open_source/diffusers/examples/community/"
|
||||
).to(device)
|
||||
|
||||
width = 512
|
||||
@@ -1013,11 +1005,11 @@ res = pipe(
|
||||
width=width,
|
||||
generator=generator)
|
||||
image = res.images[0]
|
||||
image.save(os.path.join(save_dir, 'seed_resize_{w}_{h}_image.png'.format(w=width, h=height)))
|
||||
image.save('./seed_resize/seed_resize_{w}_{h}_image.png'.format(w=width, h=height))
|
||||
|
||||
pipe_compare = DiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
custom_pipeline="seed_resize_stable_diffusion"
|
||||
custom_pipeline="/home/mark/open_source/diffusers/examples/community/"
|
||||
).to(device)
|
||||
|
||||
res = pipe_compare(
|
||||
@@ -1030,7 +1022,7 @@ res = pipe_compare(
|
||||
)
|
||||
|
||||
image = res.images[0]
|
||||
image.save(os.path.join(save_dir, 'seed_resize_{w}_{h}_image_compare.png'.format(w=width, h=height)))
|
||||
image.save('./seed_resize/seed_resize_{w}_{h}_image_compare.png'.format(w=width, h=height))
|
||||
```
|
||||
|
||||
### Multilingual Stable Diffusion Pipeline
|
||||
@@ -1107,100 +1099,38 @@ GlueGen is a minimal adapter that allows alignment between any encoder (Text Enc
|
||||
Make sure you downloaded `gluenet_French_clip_overnorm_over3_noln.ckpt` for French (there are also pre-trained weights for Chinese, Italian, Japanese, Spanish or train your own) at [GlueGen's official repo](https://github.com/salesforce/GlueGen/tree/main).
|
||||
|
||||
```python
|
||||
import os
|
||||
import gc
|
||||
import urllib.request
|
||||
from PIL import Image
|
||||
|
||||
import torch
|
||||
from transformers import XLMRobertaTokenizer, XLMRobertaForMaskedLM, CLIPTokenizer, CLIPTextModel
|
||||
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
# Download checkpoints
|
||||
CHECKPOINTS = [
|
||||
"https://storage.googleapis.com/sfr-gluegen-data-research/checkpoints_all/gluenet_checkpoint/gluenet_Chinese_clip_overnorm_over3_noln.ckpt",
|
||||
"https://storage.googleapis.com/sfr-gluegen-data-research/checkpoints_all/gluenet_checkpoint/gluenet_French_clip_overnorm_over3_noln.ckpt",
|
||||
"https://storage.googleapis.com/sfr-gluegen-data-research/checkpoints_all/gluenet_checkpoint/gluenet_Italian_clip_overnorm_over3_noln.ckpt",
|
||||
"https://storage.googleapis.com/sfr-gluegen-data-research/checkpoints_all/gluenet_checkpoint/gluenet_Japanese_clip_overnorm_over3_noln.ckpt",
|
||||
"https://storage.googleapis.com/sfr-gluegen-data-research/checkpoints_all/gluenet_checkpoint/gluenet_Spanish_clip_overnorm_over3_noln.ckpt",
|
||||
"https://storage.googleapis.com/sfr-gluegen-data-research/checkpoints_all/gluenet_checkpoint/gluenet_sound2img_audioclip_us8k.ckpt"
|
||||
]
|
||||
if __name__ == "__main__":
|
||||
device = "cuda"
|
||||
|
||||
LANGUAGE_PROMPTS = {
|
||||
"French": "une voiture sur la plage",
|
||||
#"Chinese": "海滩上的一辆车",
|
||||
#"Italian": "una macchina sulla spiaggia",
|
||||
#"Japanese": "浜辺の車",
|
||||
#"Spanish": "un coche en la playa"
|
||||
}
|
||||
lm_model_id = "xlm-roberta-large"
|
||||
token_max_length = 77
|
||||
|
||||
def download_checkpoints(checkpoint_dir):
|
||||
os.makedirs(checkpoint_dir, exist_ok=True)
|
||||
for url in CHECKPOINTS:
|
||||
filename = os.path.join(checkpoint_dir, os.path.basename(url))
|
||||
if not os.path.exists(filename):
|
||||
print(f"Downloading {filename}...")
|
||||
urllib.request.urlretrieve(url, filename)
|
||||
print(f"Downloaded {filename}")
|
||||
else:
|
||||
print(f"Checkpoint {filename} already exists, skipping download.")
|
||||
return checkpoint_dir
|
||||
text_encoder = AutoModel.from_pretrained(lm_model_id)
|
||||
tokenizer = AutoTokenizer.from_pretrained(lm_model_id, model_max_length=token_max_length, use_fast=False)
|
||||
|
||||
def load_checkpoint(pipeline, checkpoint_path, device):
|
||||
state_dict = torch.load(checkpoint_path, map_location=device)
|
||||
state_dict = state_dict.get("state_dict", state_dict)
|
||||
missing_keys, unexpected_keys = pipeline.unet.load_state_dict(state_dict, strict=False)
|
||||
return pipeline
|
||||
tensor_norm = torch.Tensor([[43.8203],[28.3668],[27.9345],[28.0084],[28.2958],[28.2576],[28.3373],[28.2695],[28.4097],[28.2790],[28.2825],[28.2807],[28.2775],[28.2708],[28.2682],[28.2624],[28.2589],[28.2611],[28.2616],[28.2639],[28.2613],[28.2566],[28.2615],[28.2665],[28.2799],[28.2885],[28.2852],[28.2863],[28.2780],[28.2818],[28.2764],[28.2532],[28.2412],[28.2336],[28.2514],[28.2734],[28.2763],[28.2977],[28.2971],[28.2948],[28.2818],[28.2676],[28.2831],[28.2890],[28.2979],[28.2999],[28.3117],[28.3363],[28.3554],[28.3626],[28.3589],[28.3597],[28.3543],[28.3660],[28.3731],[28.3717],[28.3812],[28.3753],[28.3810],[28.3777],[28.3693],[28.3713],[28.3670],[28.3691],[28.3679],[28.3624],[28.3703],[28.3703],[28.3720],[28.3594],[28.3576],[28.3562],[28.3438],[28.3376],[28.3389],[28.3433],[28.3191]])
|
||||
|
||||
def generate_image(pipeline, prompt, device, output_path):
|
||||
with torch.inference_mode():
|
||||
image = pipeline(
|
||||
prompt,
|
||||
generator=torch.Generator(device=device).manual_seed(42),
|
||||
num_inference_steps=50
|
||||
).images[0]
|
||||
image.save(output_path)
|
||||
print(f"Image saved to {output_path}")
|
||||
pipeline = DiffusionPipeline.from_pretrained(
|
||||
"stable-diffusion-v1-5/stable-diffusion-v1-5",
|
||||
text_encoder=text_encoder,
|
||||
tokenizer=tokenizer,
|
||||
custom_pipeline="gluegen"
|
||||
).to(device)
|
||||
pipeline.load_language_adapter("gluenet_French_clip_overnorm_over3_noln.ckpt", num_token=token_max_length, dim=1024, dim_out=768, tensor_norm=tensor_norm)
|
||||
|
||||
checkpoint_dir = download_checkpoints("./checkpoints_all/gluenet_checkpoint")
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
print(f"Using device: {device}")
|
||||
prompt = "une voiture sur la plage"
|
||||
|
||||
tokenizer = XLMRobertaTokenizer.from_pretrained("xlm-roberta-base", use_fast=False)
|
||||
model = XLMRobertaForMaskedLM.from_pretrained("xlm-roberta-base").to(device)
|
||||
inputs = tokenizer("Ceci est une phrase incomplète avec un [MASK].", return_tensors="pt").to(device)
|
||||
with torch.inference_mode():
|
||||
_ = model(**inputs)
|
||||
|
||||
|
||||
clip_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
||||
clip_text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14").to(device)
|
||||
|
||||
# Initialize pipeline
|
||||
pipeline = DiffusionPipeline.from_pretrained(
|
||||
"stable-diffusion-v1-5/stable-diffusion-v1-5",
|
||||
text_encoder=clip_text_encoder,
|
||||
tokenizer=clip_tokenizer,
|
||||
custom_pipeline="gluegen",
|
||||
safety_checker=None
|
||||
).to(device)
|
||||
|
||||
os.makedirs("outputs", exist_ok=True)
|
||||
|
||||
# Generate images
|
||||
for language, prompt in LANGUAGE_PROMPTS.items():
|
||||
|
||||
checkpoint_file = f"gluenet_{language}_clip_overnorm_over3_noln.ckpt"
|
||||
checkpoint_path = os.path.join(checkpoint_dir, checkpoint_file)
|
||||
try:
|
||||
pipeline = load_checkpoint(pipeline, checkpoint_path, device)
|
||||
output_path = f"outputs/gluegen_output_{language.lower()}.png"
|
||||
generate_image(pipeline, prompt, device, output_path)
|
||||
except Exception as e:
|
||||
print(f"Error processing {language} model: {e}")
|
||||
continue
|
||||
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
gc.collect()
|
||||
generator = torch.Generator(device=device).manual_seed(42)
|
||||
image = pipeline(prompt, generator=generator).images[0]
|
||||
image.save("gluegen_output_fr.png")
|
||||
```
|
||||
|
||||
Which will produce:
|
||||
@@ -1251,49 +1181,28 @@ Currently uses the CLIPSeg model for mask generation, then calls the standard St
|
||||
```python
|
||||
from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
from PIL import Image
|
||||
import requests
|
||||
import torch
|
||||
|
||||
# Load CLIPSeg model and processor
|
||||
processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
|
||||
model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined").to("cuda")
|
||||
model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
|
||||
|
||||
# Load Stable Diffusion Inpainting Pipeline with custom pipeline
|
||||
pipe = DiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-inpainting",
|
||||
custom_pipeline="text_inpainting",
|
||||
segmentation_model=model,
|
||||
segmentation_processor=processor
|
||||
).to("cuda")
|
||||
)
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
|
||||
# Load input image
|
||||
url = "https://github.com/timojl/clipseg/blob/master/example_image.jpg?raw=true"
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
image = Image.open(requests.get(url, stream=True).raw).resize((512, 512))
|
||||
text = "a glass" # will mask out this text
|
||||
prompt = "a cup" # the masked out region will be replaced with this
|
||||
|
||||
# Step 1: Resize input image for CLIPSeg (224x224)
|
||||
segmentation_input = image.resize((224, 224))
|
||||
|
||||
# Step 2: Generate segmentation mask
|
||||
text = "a glass" # Object to mask
|
||||
inputs = processor(text=text, images=segmentation_input, return_tensors="pt").to("cuda")
|
||||
|
||||
with torch.no_grad():
|
||||
mask = model(**inputs).logits.sigmoid() # Get segmentation mask
|
||||
|
||||
# Resize mask back to 512x512 for SD inpainting
|
||||
mask = torch.nn.functional.interpolate(mask.unsqueeze(0), size=(512, 512), mode="bilinear").squeeze(0)
|
||||
|
||||
# Step 3: Resize input image for Stable Diffusion
|
||||
image = image.resize((512, 512))
|
||||
|
||||
# Step 4: Run inpainting with Stable Diffusion
|
||||
prompt = "a cup" # The masked-out region will be replaced with this
|
||||
result = pipe(image=image, mask=mask, prompt=prompt,text=text).images[0]
|
||||
|
||||
# Save output
|
||||
result.save("inpainting_output.png")
|
||||
print("Inpainting completed. Image saved as 'inpainting_output.png'.")
|
||||
image = pipe(image=image, text=text, prompt=prompt).images[0]
|
||||
```
|
||||
|
||||
### Bit Diffusion
|
||||
@@ -1469,10 +1378,8 @@ There are 3 parameters for the method-
|
||||
Here is an example usage-
|
||||
|
||||
```python
|
||||
import requests
|
||||
from diffusers import DiffusionPipeline, DDIMScheduler
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
@@ -1480,11 +1387,9 @@ pipe = DiffusionPipeline.from_pretrained(
|
||||
scheduler=DDIMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler"),
|
||||
).to('cuda')
|
||||
|
||||
url = "https://user-images.githubusercontent.com/59410571/209578593-141467c7-d831-4792-8b9a-b17dc5e47816.jpg"
|
||||
response = requests.get(url)
|
||||
image = Image.open(BytesIO(response.content)).convert("RGB") # Convert to RGB to avoid issues
|
||||
img = Image.open('phone.jpg')
|
||||
mix_img = pipe(
|
||||
image,
|
||||
img,
|
||||
prompt='bed',
|
||||
kmin=0.3,
|
||||
kmax=0.5,
|
||||
@@ -1637,8 +1542,6 @@ This Diffusion Pipeline takes two images or an image_embeddings tensor of size 2
|
||||
import torch
|
||||
from diffusers import DiffusionPipeline
|
||||
from PIL import Image
|
||||
import requests
|
||||
from io import BytesIO
|
||||
|
||||
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
|
||||
dtype = torch.float16 if torch.cuda.is_available() else torch.bfloat16
|
||||
@@ -1650,25 +1553,13 @@ pipe = DiffusionPipeline.from_pretrained(
|
||||
)
|
||||
pipe.to(device)
|
||||
|
||||
# List of image URLs
|
||||
image_urls = [
|
||||
'https://camo.githubusercontent.com/ef13c8059b12947c0d5e8d3ea88900de6bf1cd76bbf61ace3928e824c491290e/68747470733a2f2f68756767696e67666163652e636f2f64617461736574732f4e616761536169416268696e61792f556e434c4950496d616765496e746572706f6c6174696f6e53616d706c65732f7265736f6c76652f6d61696e2f7374617272795f6e696768742e6a7067',
|
||||
'https://camo.githubusercontent.com/d1947ab7c49ae3f550c28409d5e8b120df48e456559cf4557306c0848337702c/68747470733a2f2f68756767696e67666163652e636f2f64617461736574732f4e616761536169416268696e61792f556e434c4950496d616765496e746572706f6c6174696f6e53616d706c65732f7265736f6c76652f6d61696e2f666c6f776572732e6a7067'
|
||||
]
|
||||
|
||||
# Open images from URLs
|
||||
images = []
|
||||
for url in image_urls:
|
||||
response = requests.get(url)
|
||||
img = Image.open(BytesIO(response.content))
|
||||
images.append(img)
|
||||
|
||||
images = [Image.open('./starry_night.jpg'), Image.open('./flowers.jpg')]
|
||||
# For best results keep the prompts close in length to each other. Of course, feel free to try out with differing lengths.
|
||||
generator = torch.Generator(device=device).manual_seed(42)
|
||||
|
||||
output = pipe(image=images, steps=6, generator=generator)
|
||||
|
||||
for i, image in enumerate(output.images):
|
||||
for i,image in enumerate(output.images):
|
||||
image.save('starry_to_flowers_%s.jpg' % i)
|
||||
```
|
||||
|
||||
@@ -1745,51 +1636,37 @@ from diffusers import DiffusionPipeline
|
||||
from PIL import Image
|
||||
from transformers import CLIPImageProcessor, CLIPModel
|
||||
|
||||
# Load CLIP model and feature extractor
|
||||
feature_extractor = CLIPImageProcessor.from_pretrained(
|
||||
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
|
||||
)
|
||||
clip_model = CLIPModel.from_pretrained(
|
||||
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16
|
||||
)
|
||||
|
||||
# Load guided pipeline
|
||||
guided_pipeline = DiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
custom_pipeline="clip_guided_stable_diffusion_img2img",
|
||||
# custom_pipeline="clip_guided_stable_diffusion",
|
||||
custom_pipeline="/home/njindal/diffusers/examples/community/clip_guided_stable_diffusion.py",
|
||||
clip_model=clip_model,
|
||||
feature_extractor=feature_extractor,
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
guided_pipeline.enable_attention_slicing()
|
||||
guided_pipeline = guided_pipeline.to("cuda")
|
||||
|
||||
# Define prompt and fetch image
|
||||
prompt = "fantasy book cover, full moon, fantasy forest landscape, golden vector elements, fantasy magic, dark light night, intricate, elegant, sharp focus, illustration, highly detailed, digital painting, concept art, matte, art by WLOP and Artgerm and Albert Bierstadt, masterpiece"
|
||||
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
||||
response = requests.get(url)
|
||||
edit_image = Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
# Run the pipeline
|
||||
init_image = Image.open(BytesIO(response.content)).convert("RGB")
|
||||
image = guided_pipeline(
|
||||
prompt=prompt,
|
||||
height=512, # Height of the output image
|
||||
width=512, # Width of the output image
|
||||
image=edit_image, # Input image to guide the diffusion
|
||||
strength=0.75, # How much to transform the input image
|
||||
num_inference_steps=30, # Number of diffusion steps
|
||||
guidance_scale=7.5, # Scale of the classifier-free guidance
|
||||
clip_guidance_scale=100, # Scale of the CLIP guidance
|
||||
num_images_per_prompt=1, # Generate one image per prompt
|
||||
eta=0.0, # Noise scheduling parameter
|
||||
num_cutouts=4, # Number of cutouts for CLIP guidance
|
||||
use_cutouts=False, # Whether to use cutouts
|
||||
output_type="pil", # Output as PIL image
|
||||
num_inference_steps=30,
|
||||
image=init_image,
|
||||
strength=0.75,
|
||||
guidance_scale=7.5,
|
||||
clip_guidance_scale=100,
|
||||
num_cutouts=4,
|
||||
use_cutouts=False,
|
||||
).images[0]
|
||||
|
||||
# Display the generated image
|
||||
image.show()
|
||||
|
||||
display(image)
|
||||
```
|
||||
|
||||
Init Image
|
||||
@@ -2366,85 +2243,6 @@ CLIP guided stable diffusion images mixing pipeline allows to combine two images
|
||||
This approach is using (optional) CoCa model to avoid writing image description.
|
||||
[More code examples](https://github.com/TheDenk/images_mixing)
|
||||
|
||||
### Example Images Mixing (with CoCa)
|
||||
|
||||
```python
|
||||
import PIL
|
||||
import torch
|
||||
import requests
|
||||
import open_clip
|
||||
from open_clip import SimpleTokenizer
|
||||
from io import BytesIO
|
||||
from diffusers import DiffusionPipeline
|
||||
from transformers import CLIPImageProcessor, CLIPModel
|
||||
|
||||
|
||||
def download_image(url):
|
||||
response = requests.get(url)
|
||||
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
# Loading additional models
|
||||
feature_extractor = CLIPImageProcessor.from_pretrained(
|
||||
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
|
||||
)
|
||||
clip_model = CLIPModel.from_pretrained(
|
||||
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16
|
||||
)
|
||||
coca_model = open_clip.create_model('coca_ViT-L-14', pretrained='laion2B-s13B-b90k').to('cuda')
|
||||
coca_model.dtype = torch.float16
|
||||
coca_transform = open_clip.image_transform(
|
||||
coca_model.visual.image_size,
|
||||
is_train=False,
|
||||
mean=getattr(coca_model.visual, 'image_mean', None),
|
||||
std=getattr(coca_model.visual, 'image_std', None),
|
||||
)
|
||||
coca_tokenizer = SimpleTokenizer()
|
||||
|
||||
# Pipeline creating
|
||||
mixing_pipeline = DiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
custom_pipeline="clip_guided_images_mixing_stable_diffusion",
|
||||
clip_model=clip_model,
|
||||
feature_extractor=feature_extractor,
|
||||
coca_model=coca_model,
|
||||
coca_tokenizer=coca_tokenizer,
|
||||
coca_transform=coca_transform,
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
mixing_pipeline.enable_attention_slicing()
|
||||
mixing_pipeline = mixing_pipeline.to("cuda")
|
||||
|
||||
# Pipeline running
|
||||
generator = torch.Generator(device="cuda").manual_seed(17)
|
||||
|
||||
def download_image(url):
|
||||
response = requests.get(url)
|
||||
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
content_image = download_image("https://huggingface.co/datasets/TheDenk/images_mixing/resolve/main/boromir.jpg")
|
||||
style_image = download_image("https://huggingface.co/datasets/TheDenk/images_mixing/resolve/main/gigachad.jpg")
|
||||
|
||||
pipe_images = mixing_pipeline(
|
||||
num_inference_steps=50,
|
||||
content_image=content_image,
|
||||
style_image=style_image,
|
||||
noise_strength=0.65,
|
||||
slerp_latent_style_strength=0.9,
|
||||
slerp_prompt_style_strength=0.1,
|
||||
slerp_clip_image_style_strength=0.1,
|
||||
guidance_scale=9.0,
|
||||
batch_size=1,
|
||||
clip_guidance_scale=100,
|
||||
generator=generator,
|
||||
).images
|
||||
|
||||
output_path = "mixed_output.jpg"
|
||||
pipe_images[0].save(output_path)
|
||||
print(f"Image saved successfully at {output_path}")
|
||||
```
|
||||
|
||||

|
||||
|
||||
### Stable Diffusion XL Long Weighted Prompt Pipeline
|
||||
|
||||
This SDXL pipeline supports unlimited length prompt and negative prompt, compatible with A1111 prompt weighted style.
|
||||
@@ -2510,7 +2308,83 @@ In the above code, the `prompt2` is appended to the `prompt`, which is more than
|
||||
|
||||
For more results, checkout [PR #6114](https://github.com/huggingface/diffusers/pull/6114).
|
||||
|
||||
### Stable Diffusion Mixture Tiling Pipeline SD 1.5
|
||||
### Example Images Mixing (with CoCa)
|
||||
|
||||
```python
|
||||
import requests
|
||||
from io import BytesIO
|
||||
|
||||
import PIL
|
||||
import torch
|
||||
import open_clip
|
||||
from open_clip import SimpleTokenizer
|
||||
from diffusers import DiffusionPipeline
|
||||
from transformers import CLIPImageProcessor, CLIPModel
|
||||
|
||||
|
||||
def download_image(url):
|
||||
response = requests.get(url)
|
||||
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
# Loading additional models
|
||||
feature_extractor = CLIPImageProcessor.from_pretrained(
|
||||
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
|
||||
)
|
||||
clip_model = CLIPModel.from_pretrained(
|
||||
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16
|
||||
)
|
||||
coca_model = open_clip.create_model('coca_ViT-L-14', pretrained='laion2B-s13B-b90k').to('cuda')
|
||||
coca_model.dtype = torch.float16
|
||||
coca_transform = open_clip.image_transform(
|
||||
coca_model.visual.image_size,
|
||||
is_train=False,
|
||||
mean=getattr(coca_model.visual, 'image_mean', None),
|
||||
std=getattr(coca_model.visual, 'image_std', None),
|
||||
)
|
||||
coca_tokenizer = SimpleTokenizer()
|
||||
|
||||
# Pipeline creating
|
||||
mixing_pipeline = DiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
custom_pipeline="clip_guided_images_mixing_stable_diffusion",
|
||||
clip_model=clip_model,
|
||||
feature_extractor=feature_extractor,
|
||||
coca_model=coca_model,
|
||||
coca_tokenizer=coca_tokenizer,
|
||||
coca_transform=coca_transform,
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
mixing_pipeline.enable_attention_slicing()
|
||||
mixing_pipeline = mixing_pipeline.to("cuda")
|
||||
|
||||
# Pipeline running
|
||||
generator = torch.Generator(device="cuda").manual_seed(17)
|
||||
|
||||
def download_image(url):
|
||||
response = requests.get(url)
|
||||
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
||||
|
||||
content_image = download_image("https://huggingface.co/datasets/TheDenk/images_mixing/resolve/main/boromir.jpg")
|
||||
style_image = download_image("https://huggingface.co/datasets/TheDenk/images_mixing/resolve/main/gigachad.jpg")
|
||||
|
||||
pipe_images = mixing_pipeline(
|
||||
num_inference_steps=50,
|
||||
content_image=content_image,
|
||||
style_image=style_image,
|
||||
noise_strength=0.65,
|
||||
slerp_latent_style_strength=0.9,
|
||||
slerp_prompt_style_strength=0.1,
|
||||
slerp_clip_image_style_strength=0.1,
|
||||
guidance_scale=9.0,
|
||||
batch_size=1,
|
||||
clip_guidance_scale=100,
|
||||
generator=generator,
|
||||
).images
|
||||
```
|
||||
|
||||

|
||||
|
||||
### Stable Diffusion Mixture Tiling
|
||||
|
||||
This pipeline uses the Mixture. Refer to the [Mixture](https://arxiv.org/abs/2302.02412) paper for more details.
|
||||
|
||||
@@ -2541,95 +2415,6 @@ image = pipeline(
|
||||
|
||||

|
||||
|
||||
### Stable Diffusion Mixture Canvas Pipeline SD 1.5
|
||||
|
||||
This pipeline uses the Mixture. Refer to the [Mixture](https://arxiv.org/abs/2302.02412) paper for more details.
|
||||
|
||||
```python
|
||||
from PIL import Image
|
||||
from diffusers import LMSDiscreteScheduler, DiffusionPipeline
|
||||
from diffusers.pipelines.pipeline_utils import Image2ImageRegion, Text2ImageRegion, preprocess_image
|
||||
|
||||
|
||||
# Load and preprocess guide image
|
||||
iic_image = preprocess_image(Image.open("input_image.png").convert("RGB"))
|
||||
|
||||
# Create scheduler and model (similar to StableDiffusionPipeline)
|
||||
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
||||
pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=scheduler).to("cuda:0", custom_pipeline="mixture_canvas")
|
||||
pipeline.to("cuda")
|
||||
|
||||
# Mixture of Diffusers generation
|
||||
output = pipeline(
|
||||
canvas_height=800,
|
||||
canvas_width=352,
|
||||
regions=[
|
||||
Text2ImageRegion(0, 800, 0, 352, guidance_scale=8,
|
||||
prompt=f"best quality, masterpiece, WLOP, sakimichan, art contest winner on pixiv, 8K, intricate details, wet effects, rain drops, ethereal, mysterious, futuristic, UHD, HDR, cinematic lighting, in a beautiful forest, rainy day, award winning, trending on artstation, beautiful confident cheerful young woman, wearing a futuristic sleeveless dress, ultra beautiful detailed eyes, hyper-detailed face, complex, perfect, model, textured, chiaroscuro, professional make-up, realistic, figure in frame, "),
|
||||
Image2ImageRegion(352-800, 352, 0, 352, reference_image=iic_image, strength=1.0),
|
||||
],
|
||||
num_inference_steps=100,
|
||||
seed=5525475061,
|
||||
)["images"][0]
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
### Stable Diffusion Mixture Tiling Pipeline SDXL
|
||||
|
||||
This pipeline uses the Mixture. Refer to the [Mixture](https://arxiv.org/abs/2302.02412) paper for more details.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler, AutoencoderKL
|
||||
|
||||
device="cuda"
|
||||
|
||||
# Load fixed vae (optional)
|
||||
vae = AutoencoderKL.from_pretrained(
|
||||
"madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
|
||||
).to(device)
|
||||
|
||||
# Create scheduler and model (similar to StableDiffusionPipeline)
|
||||
model_id="stablediffusionapi/yamermix-v8-vae"
|
||||
scheduler = DPMSolverMultistepScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
||||
pipe = DiffusionPipeline.from_pretrained(
|
||||
model_id,
|
||||
torch_dtype=torch.float16,
|
||||
vae=vae,
|
||||
custom_pipeline="mixture_tiling_sdxl",
|
||||
scheduler=scheduler,
|
||||
use_safetensors=False
|
||||
).to(device)
|
||||
|
||||
pipe.enable_model_cpu_offload()
|
||||
pipe.enable_vae_tiling()
|
||||
pipe.enable_vae_slicing()
|
||||
|
||||
generator = torch.Generator(device).manual_seed(297984183)
|
||||
|
||||
# Mixture of Diffusers generation
|
||||
image = pipe(
|
||||
prompt=[[
|
||||
"A charming house in the countryside, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
||||
"A dirt road in the countryside crossing pastures, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
||||
"An old and rusty giant robot lying on a dirt road, by jakub rozalski, dark sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece"
|
||||
]],
|
||||
tile_height=1024,
|
||||
tile_width=1280,
|
||||
tile_row_overlap=0,
|
||||
tile_col_overlap=256,
|
||||
guidance_scale_tiles=[[7, 7, 7]], # or guidance_scale=7 if is the same for all prompts
|
||||
height=1024,
|
||||
width=3840,
|
||||
generator=generator,
|
||||
num_inference_steps=30,
|
||||
)["images"][0]
|
||||
```
|
||||
|
||||

|
||||
|
||||
### TensorRT Inpainting Stable Diffusion Pipeline
|
||||
|
||||
The TensorRT Pipeline can be used to accelerate the Inpainting Stable Diffusion Inference run.
|
||||
@@ -2672,6 +2457,41 @@ image = pipe(prompt, image=input_image, mask_image=mask_image, strength=0.75,).i
|
||||
image.save('tensorrt_inpaint_mecha_robot.png')
|
||||
```
|
||||
|
||||
### Stable Diffusion Mixture Canvas
|
||||
|
||||
This pipeline uses the Mixture. Refer to the [Mixture](https://arxiv.org/abs/2302.02412) paper for more details.
|
||||
|
||||
```python
|
||||
from PIL import Image
|
||||
from diffusers import LMSDiscreteScheduler, DiffusionPipeline
|
||||
from diffusers.pipelines.pipeline_utils import Image2ImageRegion, Text2ImageRegion, preprocess_image
|
||||
|
||||
|
||||
# Load and preprocess guide image
|
||||
iic_image = preprocess_image(Image.open("input_image.png").convert("RGB"))
|
||||
|
||||
# Create scheduler and model (similar to StableDiffusionPipeline)
|
||||
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
||||
pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=scheduler).to("cuda:0", custom_pipeline="mixture_canvas")
|
||||
pipeline.to("cuda")
|
||||
|
||||
# Mixture of Diffusers generation
|
||||
output = pipeline(
|
||||
canvas_height=800,
|
||||
canvas_width=352,
|
||||
regions=[
|
||||
Text2ImageRegion(0, 800, 0, 352, guidance_scale=8,
|
||||
prompt=f"best quality, masterpiece, WLOP, sakimichan, art contest winner on pixiv, 8K, intricate details, wet effects, rain drops, ethereal, mysterious, futuristic, UHD, HDR, cinematic lighting, in a beautiful forest, rainy day, award winning, trending on artstation, beautiful confident cheerful young woman, wearing a futuristic sleeveless dress, ultra beautiful detailed eyes, hyper-detailed face, complex, perfect, model, textured, chiaroscuro, professional make-up, realistic, figure in frame, "),
|
||||
Image2ImageRegion(352-800, 352, 0, 352, reference_image=iic_image, strength=1.0),
|
||||
],
|
||||
num_inference_steps=100,
|
||||
seed=5525475061,
|
||||
)["images"][0]
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
### IADB pipeline
|
||||
|
||||
This pipeline is the implementation of the [α-(de)Blending: a Minimalist Deterministic Diffusion Model](https://arxiv.org/abs/2305.03486) paper.
|
||||
@@ -4088,89 +3908,33 @@ This pipeline provides drag-and-drop image editing using stochastic differential
|
||||
See [paper](https://arxiv.org/abs/2311.01410), [paper page](https://ml-gsai.github.io/SDE-Drag-demo/), [original repo](https://github.com/ML-GSAI/SDE-Drag) for more information.
|
||||
|
||||
```py
|
||||
import PIL
|
||||
import torch
|
||||
from diffusers import DDIMScheduler, DiffusionPipeline
|
||||
from PIL import Image
|
||||
import requests
|
||||
from io import BytesIO
|
||||
import numpy as np
|
||||
|
||||
# Load the pipeline
|
||||
model_path = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
||||
scheduler = DDIMScheduler.from_pretrained(model_path, subfolder="scheduler")
|
||||
pipe = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler, custom_pipeline="sde_drag")
|
||||
pipe.to('cuda')
|
||||
|
||||
# Ensure the model is moved to the GPU
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
pipe.to(device)
|
||||
# To save GPU memory, torch.float16 can be used, but it may compromise image quality.
|
||||
# If not training LoRA, please avoid using torch.float16
|
||||
# pipe.to(torch.float16)
|
||||
|
||||
# Function to load image from URL
|
||||
def load_image_from_url(url):
|
||||
response = requests.get(url)
|
||||
return Image.open(BytesIO(response.content)).convert("RGB")
|
||||
# Provide prompt, image, mask image, and the starting and target points for drag editing.
|
||||
prompt = "prompt of the image"
|
||||
image = PIL.Image.open('/path/to/image')
|
||||
mask_image = PIL.Image.open('/path/to/mask_image')
|
||||
source_points = [[123, 456]]
|
||||
target_points = [[234, 567]]
|
||||
|
||||
# Function to prepare mask
|
||||
def prepare_mask(mask_image):
|
||||
# Convert to grayscale
|
||||
mask = mask_image.convert("L")
|
||||
return mask
|
||||
# train_lora is optional, and in most cases, using train_lora can better preserve consistency with the original image.
|
||||
pipe.train_lora(prompt, image)
|
||||
|
||||
# Function to convert numpy array to PIL Image
|
||||
def array_to_pil(array):
|
||||
# Ensure the array is in uint8 format
|
||||
if array.dtype != np.uint8:
|
||||
if array.max() <= 1.0:
|
||||
array = (array * 255).astype(np.uint8)
|
||||
else:
|
||||
array = array.astype(np.uint8)
|
||||
|
||||
# Handle different array shapes
|
||||
if len(array.shape) == 3:
|
||||
if array.shape[0] == 3: # If channels first
|
||||
array = array.transpose(1, 2, 0)
|
||||
return Image.fromarray(array)
|
||||
elif len(array.shape) == 4: # If batch dimension
|
||||
array = array[0]
|
||||
if array.shape[0] == 3: # If channels first
|
||||
array = array.transpose(1, 2, 0)
|
||||
return Image.fromarray(array)
|
||||
else:
|
||||
raise ValueError(f"Unexpected array shape: {array.shape}")
|
||||
|
||||
# Image and mask URLs
|
||||
image_url = 'https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png'
|
||||
mask_url = 'https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png'
|
||||
|
||||
# Load the images
|
||||
image = load_image_from_url(image_url)
|
||||
mask_image = load_image_from_url(mask_url)
|
||||
|
||||
# Resize images to a size that's compatible with the model's latent space
|
||||
image = image.resize((512, 512))
|
||||
mask_image = mask_image.resize((512, 512))
|
||||
|
||||
# Prepare the mask (keep as PIL Image)
|
||||
mask = prepare_mask(mask_image)
|
||||
|
||||
# Provide the prompt and points for drag editing
|
||||
prompt = "A cute dog"
|
||||
source_points = [[32, 32]] # Adjusted for 512x512 image
|
||||
target_points = [[64, 64]] # Adjusted for 512x512 image
|
||||
|
||||
# Generate the output image
|
||||
output_array = pipe(
|
||||
prompt=prompt,
|
||||
image=image,
|
||||
mask_image=mask,
|
||||
source_points=source_points,
|
||||
target_points=target_points
|
||||
)
|
||||
|
||||
# Convert output array to PIL Image and save
|
||||
output_image = array_to_pil(output_array)
|
||||
output = pipe(prompt, image, mask_image, source_points, target_points)
|
||||
output_image = PIL.Image.fromarray(output)
|
||||
output_image.save("./output.png")
|
||||
print("Output image saved as './output.png'")
|
||||
|
||||
```
|
||||
|
||||
### Instaflow Pipeline
|
||||
@@ -4821,8 +4585,8 @@ image = pipe(
|
||||
```
|
||||
|
||||
|  |  |  |
|
||||
| -------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------ |
|
||||
| Gradient | Input | Output |
|
||||
| ------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- |
|
||||
| Gradient | Input | Output |
|
||||
|
||||
A colab notebook demonstrating all results can be found [here](https://colab.research.google.com/drive/1v44a5fpzyr4Ffr4v2XBQ7BajzG874N4P?usp=sharing). Depth Maps have also been added in the same colab.
|
||||
|
||||
@@ -4870,93 +4634,6 @@ make_image_grid(image, rows=1, cols=len(image))
|
||||
# 50+, 100+, and 250+ num_inference_steps are recommended for nesting levels 0, 1, and 2 respectively.
|
||||
```
|
||||
|
||||
### Stable Diffusion XL Attentive Eraser Pipeline
|
||||
<img src="https://raw.githubusercontent.com/Anonym0u3/Images/refs/heads/main/fenmian.png" width="600" />
|
||||
|
||||
**Stable Diffusion XL Attentive Eraser Pipeline** is an advanced object removal pipeline that leverages SDXL for precise content suppression and seamless region completion. This pipeline uses **self-attention redirection guidance** to modify the model’s self-attention mechanism, allowing for effective removal and inpainting across various levels of mask precision, including semantic segmentation masks, bounding boxes, and hand-drawn masks. If you are interested in more detailed information and have any questions, please refer to the [paper](https://arxiv.org/abs/2412.12974) and [official implementation](https://github.com/Anonym0u3/AttentiveEraser).
|
||||
|
||||
#### Key features
|
||||
|
||||
- **Tuning-Free**: No additional training is required, making it easy to integrate and use.
|
||||
- **Flexible Mask Support**: Works with different types of masks for targeted object removal.
|
||||
- **High-Quality Results**: Utilizes the inherent generative power of diffusion models for realistic content completion.
|
||||
|
||||
#### Usage example
|
||||
To use the Stable Diffusion XL Attentive Eraser Pipeline, you can initialize it as follows:
|
||||
```py
|
||||
import torch
|
||||
from diffusers import DDIMScheduler, DiffusionPipeline
|
||||
from diffusers.utils import load_image
|
||||
import torch.nn.functional as F
|
||||
from torchvision.transforms.functional import to_tensor, gaussian_blur
|
||||
|
||||
dtype = torch.float16
|
||||
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
||||
|
||||
scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False)
|
||||
pipeline = DiffusionPipeline.from_pretrained(
|
||||
"stabilityai/stable-diffusion-xl-base-1.0",
|
||||
custom_pipeline="pipeline_stable_diffusion_xl_attentive_eraser",
|
||||
scheduler=scheduler,
|
||||
variant="fp16",
|
||||
use_safetensors=True,
|
||||
torch_dtype=dtype,
|
||||
).to(device)
|
||||
|
||||
|
||||
def preprocess_image(image_path, device):
|
||||
image = to_tensor((load_image(image_path)))
|
||||
image = image.unsqueeze_(0).float() * 2 - 1 # [0,1] --> [-1,1]
|
||||
if image.shape[1] != 3:
|
||||
image = image.expand(-1, 3, -1, -1)
|
||||
image = F.interpolate(image, (1024, 1024))
|
||||
image = image.to(dtype).to(device)
|
||||
return image
|
||||
|
||||
def preprocess_mask(mask_path, device):
|
||||
mask = to_tensor((load_image(mask_path, convert_method=lambda img: img.convert('L'))))
|
||||
mask = mask.unsqueeze_(0).float() # 0 or 1
|
||||
mask = F.interpolate(mask, (1024, 1024))
|
||||
mask = gaussian_blur(mask, kernel_size=(77, 77))
|
||||
mask[mask < 0.1] = 0
|
||||
mask[mask >= 0.1] = 1
|
||||
mask = mask.to(dtype).to(device)
|
||||
return mask
|
||||
|
||||
prompt = "" # Set prompt to null
|
||||
seed=123
|
||||
generator = torch.Generator(device=device).manual_seed(seed)
|
||||
source_image_path = "https://raw.githubusercontent.com/Anonym0u3/Images/refs/heads/main/an1024.png"
|
||||
mask_path = "https://raw.githubusercontent.com/Anonym0u3/Images/refs/heads/main/an1024_mask.png"
|
||||
source_image = preprocess_image(source_image_path, device)
|
||||
mask = preprocess_mask(mask_path, device)
|
||||
|
||||
image = pipeline(
|
||||
prompt=prompt,
|
||||
image=source_image,
|
||||
mask_image=mask,
|
||||
height=1024,
|
||||
width=1024,
|
||||
AAS=True, # enable AAS
|
||||
strength=0.8, # inpainting strength
|
||||
rm_guidance_scale=9, # removal guidance scale
|
||||
ss_steps = 9, # similarity suppression steps
|
||||
ss_scale = 0.3, # similarity suppression scale
|
||||
AAS_start_step=0, # AAS start step
|
||||
AAS_start_layer=34, # AAS start layer
|
||||
AAS_end_layer=70, # AAS end layer
|
||||
num_inference_steps=50, # number of inference steps # AAS_end_step = int(strength*num_inference_steps)
|
||||
generator=generator,
|
||||
guidance_scale=1,
|
||||
).images[0]
|
||||
image.save('./removed_img.png')
|
||||
print("Object removal completed")
|
||||
```
|
||||
|
||||
| Source Image | Mask | Output |
|
||||
| ---------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- |
|
||||
|  |  |  |
|
||||
|
||||
# Perturbed-Attention Guidance
|
||||
|
||||
[Project](https://ku-cvlab.github.io/Perturbed-Attention-Guidance/) / [arXiv](https://arxiv.org/abs/2403.17377) / [GitHub](https://github.com/KU-CVLAB/Perturbed-Attention-Guidance)
|
||||
|
||||
@@ -80,6 +80,7 @@ from diffusers.utils import (
|
||||
USE_PEFT_BACKEND,
|
||||
BaseOutput,
|
||||
deprecate,
|
||||
is_torch_version,
|
||||
is_torch_xla_available,
|
||||
logging,
|
||||
replace_example_docstring,
|
||||
@@ -868,7 +869,23 @@ class CrossAttnDownBlock2D(nn.Module):
|
||||
|
||||
for i, (resnet, attn) in enumerate(blocks):
|
||||
if torch.is_grad_enabled() and self.gradient_checkpointing:
|
||||
hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb)
|
||||
|
||||
def create_custom_forward(module, return_dict=None):
|
||||
def custom_forward(*inputs):
|
||||
if return_dict is not None:
|
||||
return module(*inputs, return_dict=return_dict)
|
||||
else:
|
||||
return module(*inputs)
|
||||
|
||||
return custom_forward
|
||||
|
||||
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
|
||||
hidden_states = torch.utils.checkpoint.checkpoint(
|
||||
create_custom_forward(resnet),
|
||||
hidden_states,
|
||||
temb,
|
||||
**ckpt_kwargs,
|
||||
)
|
||||
hidden_states = attn(
|
||||
hidden_states,
|
||||
encoder_hidden_states=encoder_hidden_states,
|
||||
@@ -1013,6 +1030,17 @@ class UNetMidBlock2DCrossAttn(nn.Module):
|
||||
hidden_states = self.resnets[0](hidden_states, temb)
|
||||
for attn, resnet in zip(self.attentions, self.resnets[1:]):
|
||||
if torch.is_grad_enabled() and self.gradient_checkpointing:
|
||||
|
||||
def create_custom_forward(module, return_dict=None):
|
||||
def custom_forward(*inputs):
|
||||
if return_dict is not None:
|
||||
return module(*inputs, return_dict=return_dict)
|
||||
else:
|
||||
return module(*inputs)
|
||||
|
||||
return custom_forward
|
||||
|
||||
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
|
||||
hidden_states = attn(
|
||||
hidden_states,
|
||||
encoder_hidden_states=encoder_hidden_states,
|
||||
@@ -1021,7 +1049,12 @@ class UNetMidBlock2DCrossAttn(nn.Module):
|
||||
encoder_attention_mask=encoder_attention_mask,
|
||||
return_dict=False,
|
||||
)[0]
|
||||
hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb)
|
||||
hidden_states = torch.utils.checkpoint.checkpoint(
|
||||
create_custom_forward(resnet),
|
||||
hidden_states,
|
||||
temb,
|
||||
**ckpt_kwargs,
|
||||
)
|
||||
else:
|
||||
hidden_states = attn(
|
||||
hidden_states,
|
||||
@@ -1159,7 +1192,23 @@ class CrossAttnUpBlock2D(nn.Module):
|
||||
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
|
||||
|
||||
if torch.is_grad_enabled() and self.gradient_checkpointing:
|
||||
hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb)
|
||||
|
||||
def create_custom_forward(module, return_dict=None):
|
||||
def custom_forward(*inputs):
|
||||
if return_dict is not None:
|
||||
return module(*inputs, return_dict=return_dict)
|
||||
else:
|
||||
return module(*inputs)
|
||||
|
||||
return custom_forward
|
||||
|
||||
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
|
||||
hidden_states = torch.utils.checkpoint.checkpoint(
|
||||
create_custom_forward(resnet),
|
||||
hidden_states,
|
||||
temb,
|
||||
**ckpt_kwargs,
|
||||
)
|
||||
hidden_states = attn(
|
||||
hidden_states,
|
||||
encoder_hidden_states=encoder_hidden_states,
|
||||
@@ -1233,6 +1282,10 @@ class MatryoshkaTransformer2DModel(LegacyModelMixin, LegacyConfigMixin):
|
||||
]
|
||||
)
|
||||
|
||||
def _set_gradient_checkpointing(self, module, value=False):
|
||||
if hasattr(module, "gradient_checkpointing"):
|
||||
module.gradient_checkpointing = value
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states: torch.Tensor,
|
||||
@@ -1312,8 +1365,19 @@ class MatryoshkaTransformer2DModel(LegacyModelMixin, LegacyConfigMixin):
|
||||
# Blocks
|
||||
for block in self.transformer_blocks:
|
||||
if torch.is_grad_enabled() and self.gradient_checkpointing:
|
||||
hidden_states = self._gradient_checkpointing_func(
|
||||
block,
|
||||
|
||||
def create_custom_forward(module, return_dict=None):
|
||||
def custom_forward(*inputs):
|
||||
if return_dict is not None:
|
||||
return module(*inputs, return_dict=return_dict)
|
||||
else:
|
||||
return module(*inputs)
|
||||
|
||||
return custom_forward
|
||||
|
||||
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
|
||||
hidden_states = torch.utils.checkpoint.checkpoint(
|
||||
create_custom_forward(block),
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
encoder_hidden_states,
|
||||
@@ -1321,6 +1385,7 @@ class MatryoshkaTransformer2DModel(LegacyModelMixin, LegacyConfigMixin):
|
||||
timestep,
|
||||
cross_attention_kwargs,
|
||||
class_labels,
|
||||
**ckpt_kwargs,
|
||||
)
|
||||
else:
|
||||
hidden_states = block(
|
||||
@@ -2659,6 +2724,10 @@ class MatryoshkaUNet2DConditionModel(
|
||||
for module in self.children():
|
||||
fn_recursive_set_attention_slice(module, reversed_slice_size)
|
||||
|
||||
def _set_gradient_checkpointing(self, module, value=False):
|
||||
if hasattr(module, "gradient_checkpointing"):
|
||||
module.gradient_checkpointing = value
|
||||
|
||||
def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
|
||||
r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497.
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -87,7 +87,7 @@ def calculate_shift(
|
||||
base_seq_len: int = 256,
|
||||
max_seq_len: int = 4096,
|
||||
base_shift: float = 0.5,
|
||||
max_shift: float = 1.15,
|
||||
max_shift: float = 1.16,
|
||||
):
|
||||
m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
|
||||
b = base_shift - m * base_seq_len
|
||||
@@ -878,7 +878,7 @@ class FluxDifferentialImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin):
|
||||
self.scheduler.config.get("base_image_seq_len", 256),
|
||||
self.scheduler.config.get("max_image_seq_len", 4096),
|
||||
self.scheduler.config.get("base_shift", 0.5),
|
||||
self.scheduler.config.get("max_shift", 1.15),
|
||||
self.scheduler.config.get("max_shift", 1.16),
|
||||
)
|
||||
timesteps, num_inference_steps = retrieve_timesteps(
|
||||
self.scheduler,
|
||||
|
||||
@@ -94,7 +94,7 @@ def calculate_shift(
|
||||
base_seq_len: int = 256,
|
||||
max_seq_len: int = 4096,
|
||||
base_shift: float = 0.5,
|
||||
max_shift: float = 1.15,
|
||||
max_shift: float = 1.16,
|
||||
):
|
||||
m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
|
||||
b = base_shift - m * base_seq_len
|
||||
@@ -823,7 +823,7 @@ class RFInversionFluxPipeline(
|
||||
self.scheduler.config.get("base_image_seq_len", 256),
|
||||
self.scheduler.config.get("max_image_seq_len", 4096),
|
||||
self.scheduler.config.get("base_shift", 0.5),
|
||||
self.scheduler.config.get("max_shift", 1.15),
|
||||
self.scheduler.config.get("max_shift", 1.16),
|
||||
)
|
||||
timesteps, num_inference_steps = retrieve_timesteps(
|
||||
self.scheduler,
|
||||
@@ -993,7 +993,7 @@ class RFInversionFluxPipeline(
|
||||
self.scheduler.config.get("base_image_seq_len", 256),
|
||||
self.scheduler.config.get("max_image_seq_len", 4096),
|
||||
self.scheduler.config.get("base_shift", 0.5),
|
||||
self.scheduler.config.get("max_shift", 1.15),
|
||||
self.scheduler.config.get("max_shift", 1.16),
|
||||
)
|
||||
timesteps, num_inversion_steps = retrieve_timesteps(
|
||||
self.scheduler,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -70,7 +70,7 @@ def calculate_shift(
|
||||
base_seq_len: int = 256,
|
||||
max_seq_len: int = 4096,
|
||||
base_shift: float = 0.5,
|
||||
max_shift: float = 1.15,
|
||||
max_shift: float = 1.16,
|
||||
):
|
||||
m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
|
||||
b = base_shift - m * base_seq_len
|
||||
@@ -759,7 +759,7 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi
|
||||
self.scheduler.config.get("base_image_seq_len", 256),
|
||||
self.scheduler.config.get("max_image_seq_len", 4096),
|
||||
self.scheduler.config.get("base_shift", 0.5),
|
||||
self.scheduler.config.get("max_shift", 1.15),
|
||||
self.scheduler.config.get("max_shift", 1.16),
|
||||
)
|
||||
timesteps, num_inference_steps = retrieve_timesteps(
|
||||
self.scheduler,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -193,8 +193,7 @@ class StableDiffusionXLControlNetReferencePipeline(StableDiffusionXLControlNetPi
|
||||
|
||||
def prepare_ref_latents(self, refimage, batch_size, dtype, device, generator, do_classifier_free_guidance):
|
||||
refimage = refimage.to(device=device)
|
||||
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
||||
if needs_upcasting:
|
||||
if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
|
||||
self.upcast_vae()
|
||||
refimage = refimage.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
||||
if refimage.dtype != self.vae.dtype:
|
||||
@@ -224,11 +223,6 @@ class StableDiffusionXLControlNetReferencePipeline(StableDiffusionXLControlNetPi
|
||||
|
||||
# aligning device to prevent device errors when concating it with the latent model input
|
||||
ref_image_latents = ref_image_latents.to(device=device, dtype=dtype)
|
||||
|
||||
# cast back to fp16 if needed
|
||||
if needs_upcasting:
|
||||
self.vae.to(dtype=torch.float16)
|
||||
|
||||
return ref_image_latents
|
||||
|
||||
def prepare_ref_image(
|
||||
|
||||
@@ -139,8 +139,7 @@ def retrieve_timesteps(
|
||||
class StableDiffusionXLReferencePipeline(StableDiffusionXLPipeline):
|
||||
def prepare_ref_latents(self, refimage, batch_size, dtype, device, generator, do_classifier_free_guidance):
|
||||
refimage = refimage.to(device=device)
|
||||
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
||||
if needs_upcasting:
|
||||
if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
|
||||
self.upcast_vae()
|
||||
refimage = refimage.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
||||
if refimage.dtype != self.vae.dtype:
|
||||
@@ -170,11 +169,6 @@ class StableDiffusionXLReferencePipeline(StableDiffusionXLPipeline):
|
||||
|
||||
# aligning device to prevent device errors when concating it with the latent model input
|
||||
ref_image_latents = ref_image_latents.to(device=device, dtype=dtype)
|
||||
|
||||
# cast back to fp16 if needed
|
||||
if needs_upcasting:
|
||||
self.vae.to(dtype=torch.float16)
|
||||
|
||||
return ref_image_latents
|
||||
|
||||
def prepare_ref_image(
|
||||
|
||||
@@ -1143,7 +1143,7 @@ def main(args):
|
||||
if global_step >= args.max_train_steps:
|
||||
break
|
||||
|
||||
# Create the pipeline using the trained modules and save it.
|
||||
# Create the pipeline using using the trained modules and save it.
|
||||
accelerator.wait_for_everyone()
|
||||
if accelerator.is_main_process:
|
||||
controlnet = unwrap_model(controlnet)
|
||||
|
||||
@@ -742,29 +742,3 @@ accelerate launch train_dreambooth.py \
|
||||
## Stable Diffusion XL
|
||||
|
||||
We support fine-tuning of the UNet shipped in [Stable Diffusion XL](https://huggingface.co/papers/2307.01952) with DreamBooth and LoRA via the `train_dreambooth_lora_sdxl.py` script. Please refer to the docs [here](./README_sdxl.md).
|
||||
|
||||
## Dataset
|
||||
|
||||
We support 🤗 [Datasets](https://huggingface.co/docs/datasets/index), you can find a dataset on the [Hugging Face Hub](https://huggingface.co/datasets) or use your own.
|
||||
|
||||
The quickest way to get started with your custom dataset is 🤗 Datasets' [`ImageFolder`](https://huggingface.co/docs/datasets/image_dataset#imagefolder).
|
||||
|
||||
We need to create a file `metadata.jsonl` in the directory with our images:
|
||||
|
||||
```
|
||||
{"file_name": "01.jpg", "prompt": "prompt 01"}
|
||||
{"file_name": "02.jpg", "prompt": "prompt 02"}
|
||||
```
|
||||
|
||||
If we have a directory with image-text pairs e.g. `01.jpg` and `01.txt` then `convert_to_imagefolder.py` can create `metadata.jsonl`.
|
||||
|
||||
```sh
|
||||
python convert_to_imagefolder.py --path my_dataset/
|
||||
```
|
||||
|
||||
We use `--dataset_name` and `--caption_column` with training scripts.
|
||||
|
||||
```
|
||||
--dataset_name=my_dataset/
|
||||
--caption_column=prompt
|
||||
```
|
||||
|
||||
@@ -1,127 +0,0 @@
|
||||
# DreamBooth training example for Lumina2
|
||||
|
||||
[DreamBooth](https://arxiv.org/abs/2208.12242) is a method to personalize text2image models like stable diffusion given just a few (3~5) images of a subject.
|
||||
|
||||
The `train_dreambooth_lora_lumina2.py` script shows how to implement the training procedure with [LoRA](https://huggingface.co/docs/peft/conceptual_guides/adapter#low-rank-adaptation-lora) and adapt it for [Lumina2](https://huggingface.co/docs/diffusers/main/en/api/pipelines/lumina2).
|
||||
|
||||
|
||||
This will also allow us to push the trained model parameters to the Hugging Face Hub platform.
|
||||
|
||||
## Running locally with PyTorch
|
||||
|
||||
### Installing the dependencies
|
||||
|
||||
Before running the scripts, make sure to install the library's training dependencies:
|
||||
|
||||
**Important**
|
||||
|
||||
To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/huggingface/diffusers
|
||||
cd diffusers
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
Then cd in the `examples/dreambooth` folder and run
|
||||
```bash
|
||||
pip install -r requirements_sana.txt
|
||||
```
|
||||
|
||||
And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
Or for a default accelerate configuration without answering questions about your environment
|
||||
|
||||
```bash
|
||||
accelerate config default
|
||||
```
|
||||
|
||||
Or if your environment doesn't support an interactive shell (e.g., a notebook)
|
||||
|
||||
```python
|
||||
from accelerate.utils import write_basic_config
|
||||
write_basic_config()
|
||||
```
|
||||
|
||||
When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups.
|
||||
Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.14.0` installed in your environment.
|
||||
|
||||
|
||||
### Dog toy example
|
||||
|
||||
Now let's get our dataset. For this example we will use some dog images: https://huggingface.co/datasets/diffusers/dog-example.
|
||||
|
||||
Let's first download it locally:
|
||||
|
||||
```python
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
local_dir = "./dog"
|
||||
snapshot_download(
|
||||
"diffusers/dog-example",
|
||||
local_dir=local_dir, repo_type="dataset",
|
||||
ignore_patterns=".gitattributes",
|
||||
)
|
||||
```
|
||||
|
||||
This will also allow us to push the trained LoRA parameters to the Hugging Face Hub platform.
|
||||
|
||||
Now, we can launch training using:
|
||||
|
||||
```bash
|
||||
export MODEL_NAME="Alpha-VLLM/Lumina-Image-2.0"
|
||||
export INSTANCE_DIR="dog"
|
||||
export OUTPUT_DIR="trained-lumina2-lora"
|
||||
|
||||
accelerate launch train_dreambooth_lora_lumina2.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--mixed_precision="bf16" \
|
||||
--instance_prompt="a photo of sks dog" \
|
||||
--resolution=1024 \
|
||||
--train_batch_size=1 \
|
||||
--gradient_accumulation_steps=4 \
|
||||
--use_8bit_adam \
|
||||
--learning_rate=1e-4 \
|
||||
--report_to="wandb" \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--max_train_steps=500 \
|
||||
--validation_prompt="A photo of sks dog in a bucket" \
|
||||
--validation_epochs=25 \
|
||||
--seed="0" \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
For using `push_to_hub`, make you're logged into your Hugging Face account:
|
||||
|
||||
```bash
|
||||
huggingface-cli login
|
||||
```
|
||||
|
||||
To better track our training experiments, we're using the following flags in the command above:
|
||||
|
||||
* `report_to="wandb` will ensure the training runs are tracked on [Weights and Biases](https://wandb.ai/site). To use it, be sure to install `wandb` with `pip install wandb`. Don't forget to call `wandb login <your_api_key>` before training if you haven't done it before.
|
||||
* `validation_prompt` and `validation_epochs` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected.
|
||||
|
||||
## Notes
|
||||
|
||||
Additionally, we welcome you to explore the following CLI arguments:
|
||||
|
||||
* `--lora_layers`: The transformer modules to apply LoRA training on. Please specify the layers in a comma seperated. E.g. - "to_k,to_q,to_v" will result in lora training of attention layers only.
|
||||
* `--system_prompt`: A custom system prompt to provide additional personality to the model.
|
||||
* `--max_sequence_length`: Maximum sequence length to use for text embeddings.
|
||||
|
||||
|
||||
We provide several options for optimizing memory optimization:
|
||||
|
||||
* `--offload`: When enabled, we will offload the text encoder and VAE to CPU, when they are not used.
|
||||
* `cache_latents`: When enabled, we will pre-compute the latents from the input images with the VAE and remove the VAE from memory once done.
|
||||
* `--use_8bit_adam`: When enabled, we will use the 8bit version of AdamW provided by the `bitsandbytes` library.
|
||||
|
||||
Refer to the [official documentation](https://huggingface.co/docs/diffusers/main/en/api/pipelines/lumina2) of the `LuminaPipeline` to know more about the model.
|
||||
@@ -1,32 +0,0 @@
|
||||
import argparse
|
||||
import json
|
||||
import pathlib
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--path",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Path to folder with image-text pairs.",
|
||||
)
|
||||
parser.add_argument("--caption_column", type=str, default="prompt", help="Name of caption column.")
|
||||
args = parser.parse_args()
|
||||
|
||||
path = pathlib.Path(args.path)
|
||||
if not path.exists():
|
||||
raise RuntimeError(f"`--path` '{args.path}' does not exist.")
|
||||
|
||||
all_files = list(path.glob("*"))
|
||||
captions = list(path.glob("*.txt"))
|
||||
images = set(all_files) - set(captions)
|
||||
images = {image.stem: image for image in images}
|
||||
caption_image = {caption: images.get(caption.stem) for caption in captions if images.get(caption.stem)}
|
||||
|
||||
metadata = path.joinpath("metadata.jsonl")
|
||||
|
||||
with metadata.open("w", encoding="utf-8") as f:
|
||||
for caption, image in caption_image.items():
|
||||
caption_text = caption.read_text(encoding="utf-8")
|
||||
json.dump({"file_name": image.name, args.caption_column: caption_text}, f)
|
||||
f.write("\n")
|
||||
@@ -1,206 +0,0 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2024 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
import safetensors
|
||||
|
||||
|
||||
sys.path.append("..")
|
||||
from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
logger = logging.getLogger()
|
||||
stream_handler = logging.StreamHandler(sys.stdout)
|
||||
logger.addHandler(stream_handler)
|
||||
|
||||
|
||||
class DreamBoothLoRAlumina2(ExamplesTestsAccelerate):
|
||||
instance_data_dir = "docs/source/en/imgs"
|
||||
pretrained_model_name_or_path = "hf-internal-testing/tiny-lumina2-pipe"
|
||||
script_path = "examples/dreambooth/train_dreambooth_lora_lumina2.py"
|
||||
transformer_layer_type = "layers.0.attn.to_k"
|
||||
|
||||
def test_dreambooth_lora_lumina2(self):
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
test_args = f"""
|
||||
{self.script_path}
|
||||
--pretrained_model_name_or_path {self.pretrained_model_name_or_path}
|
||||
--instance_data_dir {self.instance_data_dir}
|
||||
--resolution 32
|
||||
--train_batch_size 1
|
||||
--gradient_accumulation_steps 1
|
||||
--max_train_steps 2
|
||||
--learning_rate 5.0e-04
|
||||
--scale_lr
|
||||
--lr_scheduler constant
|
||||
--lr_warmup_steps 0
|
||||
--output_dir {tmpdir}
|
||||
--max_sequence_length 16
|
||||
""".split()
|
||||
|
||||
test_args.extend(["--instance_prompt", ""])
|
||||
run_command(self._launch_args + test_args)
|
||||
# save_pretrained smoke test
|
||||
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
|
||||
|
||||
# make sure the state_dict has the correct naming in the parameters.
|
||||
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
|
||||
is_lora = all("lora" in k for k in lora_state_dict.keys())
|
||||
self.assertTrue(is_lora)
|
||||
|
||||
# when not training the text encoder, all the parameters in the state dict should start
|
||||
# with `"transformer"` in their names.
|
||||
starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys())
|
||||
self.assertTrue(starts_with_transformer)
|
||||
|
||||
def test_dreambooth_lora_latent_caching(self):
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
test_args = f"""
|
||||
{self.script_path}
|
||||
--pretrained_model_name_or_path {self.pretrained_model_name_or_path}
|
||||
--instance_data_dir {self.instance_data_dir}
|
||||
--resolution 32
|
||||
--train_batch_size 1
|
||||
--gradient_accumulation_steps 1
|
||||
--max_train_steps 2
|
||||
--cache_latents
|
||||
--learning_rate 5.0e-04
|
||||
--scale_lr
|
||||
--lr_scheduler constant
|
||||
--lr_warmup_steps 0
|
||||
--output_dir {tmpdir}
|
||||
--max_sequence_length 16
|
||||
""".split()
|
||||
|
||||
test_args.extend(["--instance_prompt", ""])
|
||||
run_command(self._launch_args + test_args)
|
||||
# save_pretrained smoke test
|
||||
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
|
||||
|
||||
# make sure the state_dict has the correct naming in the parameters.
|
||||
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
|
||||
is_lora = all("lora" in k for k in lora_state_dict.keys())
|
||||
self.assertTrue(is_lora)
|
||||
|
||||
# when not training the text encoder, all the parameters in the state dict should start
|
||||
# with `"transformer"` in their names.
|
||||
starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys())
|
||||
self.assertTrue(starts_with_transformer)
|
||||
|
||||
def test_dreambooth_lora_layers(self):
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
test_args = f"""
|
||||
{self.script_path}
|
||||
--pretrained_model_name_or_path {self.pretrained_model_name_or_path}
|
||||
--instance_data_dir {self.instance_data_dir}
|
||||
--resolution 32
|
||||
--train_batch_size 1
|
||||
--gradient_accumulation_steps 1
|
||||
--max_train_steps 2
|
||||
--cache_latents
|
||||
--learning_rate 5.0e-04
|
||||
--scale_lr
|
||||
--lora_layers {self.transformer_layer_type}
|
||||
--lr_scheduler constant
|
||||
--lr_warmup_steps 0
|
||||
--output_dir {tmpdir}
|
||||
--max_sequence_length 16
|
||||
""".split()
|
||||
|
||||
test_args.extend(["--instance_prompt", ""])
|
||||
run_command(self._launch_args + test_args)
|
||||
# save_pretrained smoke test
|
||||
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
|
||||
|
||||
# make sure the state_dict has the correct naming in the parameters.
|
||||
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
|
||||
is_lora = all("lora" in k for k in lora_state_dict.keys())
|
||||
self.assertTrue(is_lora)
|
||||
|
||||
# when not training the text encoder, all the parameters in the state dict should start
|
||||
# with `"transformer"` in their names. In this test, we only params of
|
||||
# `self.transformer_layer_type` should be in the state dict.
|
||||
starts_with_transformer = all(self.transformer_layer_type in key for key in lora_state_dict)
|
||||
self.assertTrue(starts_with_transformer)
|
||||
|
||||
def test_dreambooth_lora_lumina2_checkpointing_checkpoints_total_limit(self):
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
test_args = f"""
|
||||
{self.script_path}
|
||||
--pretrained_model_name_or_path={self.pretrained_model_name_or_path}
|
||||
--instance_data_dir={self.instance_data_dir}
|
||||
--output_dir={tmpdir}
|
||||
--resolution=32
|
||||
--train_batch_size=1
|
||||
--gradient_accumulation_steps=1
|
||||
--max_train_steps=6
|
||||
--checkpoints_total_limit=2
|
||||
--checkpointing_steps=2
|
||||
--max_sequence_length 16
|
||||
""".split()
|
||||
|
||||
test_args.extend(["--instance_prompt", ""])
|
||||
run_command(self._launch_args + test_args)
|
||||
|
||||
self.assertEqual(
|
||||
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
|
||||
{"checkpoint-4", "checkpoint-6"},
|
||||
)
|
||||
|
||||
def test_dreambooth_lora_lumina2_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
test_args = f"""
|
||||
{self.script_path}
|
||||
--pretrained_model_name_or_path={self.pretrained_model_name_or_path}
|
||||
--instance_data_dir={self.instance_data_dir}
|
||||
--output_dir={tmpdir}
|
||||
--resolution=32
|
||||
--train_batch_size=1
|
||||
--gradient_accumulation_steps=1
|
||||
--max_train_steps=4
|
||||
--checkpointing_steps=2
|
||||
--max_sequence_length 166
|
||||
""".split()
|
||||
|
||||
test_args.extend(["--instance_prompt", ""])
|
||||
run_command(self._launch_args + test_args)
|
||||
|
||||
self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"})
|
||||
|
||||
resume_run_args = f"""
|
||||
{self.script_path}
|
||||
--pretrained_model_name_or_path={self.pretrained_model_name_or_path}
|
||||
--instance_data_dir={self.instance_data_dir}
|
||||
--output_dir={tmpdir}
|
||||
--resolution=32
|
||||
--train_batch_size=1
|
||||
--gradient_accumulation_steps=1
|
||||
--max_train_steps=8
|
||||
--checkpointing_steps=2
|
||||
--resume_from_checkpoint=checkpoint-4
|
||||
--checkpoints_total_limit=2
|
||||
--max_sequence_length 16
|
||||
""".split()
|
||||
|
||||
resume_run_args.extend(["--instance_prompt", ""])
|
||||
run_command(self._launch_args + resume_run_args)
|
||||
|
||||
self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"})
|
||||
File diff suppressed because it is too large
Load Diff
@@ -63,7 +63,6 @@ from diffusers.utils import (
|
||||
is_wandb_available,
|
||||
)
|
||||
from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
|
||||
from diffusers.utils.import_utils import is_torch_npu_available
|
||||
from diffusers.utils.torch_utils import is_compiled_module
|
||||
|
||||
|
||||
@@ -75,9 +74,6 @@ check_min_version("0.33.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
if is_torch_npu_available():
|
||||
torch.npu.config.allow_internal_format = False
|
||||
|
||||
|
||||
def save_model_card(
|
||||
repo_id: str,
|
||||
@@ -605,7 +601,6 @@ def parse_args(input_args=None):
|
||||
)
|
||||
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
|
||||
parser.add_argument("--enable_vae_tiling", action="store_true", help="Enabla vae tiling in log validation")
|
||||
parser.add_argument("--enable_npu_flash_attention", action="store_true", help="Enabla Flash Attention for NPU")
|
||||
|
||||
if input_args is not None:
|
||||
args = parser.parse_args(input_args)
|
||||
@@ -929,7 +924,8 @@ def main(args):
|
||||
image.save(image_filename)
|
||||
|
||||
del pipeline
|
||||
free_memory()
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
# Handle the repository creation
|
||||
if accelerator.is_main_process:
|
||||
@@ -992,14 +988,6 @@ def main(args):
|
||||
# because Gemma2 is particularly suited for bfloat16.
|
||||
text_encoder.to(dtype=torch.bfloat16)
|
||||
|
||||
if args.enable_npu_flash_attention:
|
||||
if is_torch_npu_available():
|
||||
logger.info("npu flash attention enabled.")
|
||||
for block in transformer.transformer_blocks:
|
||||
block.attn2.set_use_npu_flash_attention(True)
|
||||
else:
|
||||
raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu device ")
|
||||
|
||||
# Initialize a text encoding pipeline and keep it to CPU for now.
|
||||
text_encoding_pipeline = SanaPipeline.from_pretrained(
|
||||
args.pretrained_model_name_or_path,
|
||||
|
||||
@@ -695,7 +695,7 @@ def main():
|
||||
)
|
||||
# We need to ensure that the original and the edited images undergo the same
|
||||
# augmentation transforms.
|
||||
images = np.stack([original_images, edited_images])
|
||||
images = np.concatenate([original_images, edited_images])
|
||||
images = torch.tensor(images)
|
||||
images = 2 * (images / 255) - 1
|
||||
return train_transforms(images)
|
||||
@@ -706,7 +706,7 @@ def main():
|
||||
# Since the original and edited images were concatenated before
|
||||
# applying the transformations, we need to separate them and reshape
|
||||
# them accordingly.
|
||||
original_images, edited_images = preprocessed_images
|
||||
original_images, edited_images = preprocessed_images.chunk(2)
|
||||
original_images = original_images.reshape(-1, 3, args.resolution, args.resolution)
|
||||
edited_images = edited_images.reshape(-1, 3, args.resolution, args.resolution)
|
||||
|
||||
|
||||
@@ -766,7 +766,7 @@ def main():
|
||||
)
|
||||
# We need to ensure that the original and the edited images undergo the same
|
||||
# augmentation transforms.
|
||||
images = np.stack([original_images, edited_images])
|
||||
images = np.concatenate([original_images, edited_images])
|
||||
images = torch.tensor(images)
|
||||
images = 2 * (images / 255) - 1
|
||||
return train_transforms(images)
|
||||
@@ -906,7 +906,7 @@ def main():
|
||||
# Since the original and edited images were concatenated before
|
||||
# applying the transformations, we need to separate them and reshape
|
||||
# them accordingly.
|
||||
original_images, edited_images = preprocessed_images
|
||||
original_images, edited_images = preprocessed_images.chunk(2)
|
||||
original_images = original_images.reshape(-1, 3, args.resolution, args.resolution)
|
||||
edited_images = edited_images.reshape(-1, 3, args.resolution, args.resolution)
|
||||
|
||||
|
||||
@@ -82,11 +82,31 @@ pipeline = EasyPipelineForInpainting.from_huggingface(
|
||||
## Search Civitai and Huggingface
|
||||
|
||||
```python
|
||||
# Load Lora into the pipeline.
|
||||
pipeline.auto_load_lora_weights("Detail Tweaker")
|
||||
from pipeline_easy import (
|
||||
search_huggingface,
|
||||
search_civitai,
|
||||
)
|
||||
|
||||
# Search Lora
|
||||
Lora = search_civitai(
|
||||
"Keyword_to_search_Lora",
|
||||
model_type="LORA",
|
||||
base_model = "SD 1.5",
|
||||
download=True,
|
||||
)
|
||||
# Load Lora into the pipeline.
|
||||
pipeline.load_lora_weights(Lora)
|
||||
|
||||
|
||||
# Search TextualInversion
|
||||
TextualInversion = search_civitai(
|
||||
"EasyNegative",
|
||||
model_type="TextualInversion",
|
||||
base_model = "SD 1.5",
|
||||
download=True
|
||||
)
|
||||
# Load TextualInversion into the pipeline.
|
||||
pipeline.auto_load_textual_inversion("EasyNegative", token="EasyNegative")
|
||||
pipeline.load_textual_inversion(TextualInversion, token="EasyNegative")
|
||||
```
|
||||
|
||||
### Search Civitai
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2025 suzukimain
|
||||
# Copyright 2024 suzukimain
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,13 +15,11 @@
|
||||
|
||||
import os
|
||||
import re
|
||||
import types
|
||||
from collections import OrderedDict
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from typing import Dict, List, Optional, Union
|
||||
from dataclasses import asdict, dataclass
|
||||
from typing import Union
|
||||
|
||||
import requests
|
||||
import torch
|
||||
from huggingface_hub import hf_api, hf_hub_download
|
||||
from huggingface_hub.file_download import http_get
|
||||
from huggingface_hub.utils import validate_hf_hub_args
|
||||
@@ -32,7 +30,6 @@ from diffusers.loaders.single_file_utils import (
|
||||
infer_diffusers_model_type,
|
||||
load_single_file_checkpoint,
|
||||
)
|
||||
from diffusers.pipelines.animatediff import AnimateDiffPipeline, AnimateDiffSDXLPipeline
|
||||
from diffusers.pipelines.auto_pipeline import (
|
||||
AutoPipelineForImage2Image,
|
||||
AutoPipelineForInpainting,
|
||||
@@ -42,18 +39,13 @@ from diffusers.pipelines.controlnet import (
|
||||
StableDiffusionControlNetImg2ImgPipeline,
|
||||
StableDiffusionControlNetInpaintPipeline,
|
||||
StableDiffusionControlNetPipeline,
|
||||
StableDiffusionXLControlNetImg2ImgPipeline,
|
||||
StableDiffusionXLControlNetPipeline,
|
||||
)
|
||||
from diffusers.pipelines.flux import FluxImg2ImgPipeline, FluxPipeline
|
||||
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
||||
from diffusers.pipelines.stable_diffusion import (
|
||||
StableDiffusionImg2ImgPipeline,
|
||||
StableDiffusionInpaintPipeline,
|
||||
StableDiffusionPipeline,
|
||||
StableDiffusionUpscalePipeline,
|
||||
)
|
||||
from diffusers.pipelines.stable_diffusion_3 import StableDiffusion3Img2ImgPipeline, StableDiffusion3Pipeline
|
||||
from diffusers.pipelines.stable_diffusion_xl import (
|
||||
StableDiffusionXLImg2ImgPipeline,
|
||||
StableDiffusionXLInpaintPipeline,
|
||||
@@ -67,133 +59,46 @@ logger = logging.get_logger(__name__)
|
||||
|
||||
SINGLE_FILE_CHECKPOINT_TEXT2IMAGE_PIPELINE_MAPPING = OrderedDict(
|
||||
[
|
||||
("animatediff_rgb", AnimateDiffPipeline),
|
||||
("animatediff_scribble", AnimateDiffPipeline),
|
||||
("animatediff_sdxl_beta", AnimateDiffSDXLPipeline),
|
||||
("animatediff_v1", AnimateDiffPipeline),
|
||||
("animatediff_v2", AnimateDiffPipeline),
|
||||
("animatediff_v3", AnimateDiffPipeline),
|
||||
("autoencoder-dc-f128c512", None),
|
||||
("autoencoder-dc-f32c32", None),
|
||||
("autoencoder-dc-f32c32-sana", None),
|
||||
("autoencoder-dc-f64c128", None),
|
||||
("controlnet", StableDiffusionControlNetPipeline),
|
||||
("controlnet_xl", StableDiffusionXLControlNetPipeline),
|
||||
("controlnet_xl_large", StableDiffusionXLControlNetPipeline),
|
||||
("controlnet_xl_mid", StableDiffusionXLControlNetPipeline),
|
||||
("controlnet_xl_small", StableDiffusionXLControlNetPipeline),
|
||||
("flux-depth", FluxPipeline),
|
||||
("flux-dev", FluxPipeline),
|
||||
("flux-fill", FluxPipeline),
|
||||
("flux-schnell", FluxPipeline),
|
||||
("hunyuan-video", None),
|
||||
("xl_base", StableDiffusionXLPipeline),
|
||||
("xl_refiner", StableDiffusionXLPipeline),
|
||||
("xl_inpaint", None),
|
||||
("playground-v2-5", StableDiffusionXLPipeline),
|
||||
("upscale", None),
|
||||
("inpainting", None),
|
||||
("inpainting_v2", None),
|
||||
("ltx-video", None),
|
||||
("ltx-video-0.9.1", None),
|
||||
("mochi-1-preview", None),
|
||||
("playground-v2-5", StableDiffusionXLPipeline),
|
||||
("sd3", StableDiffusion3Pipeline),
|
||||
("sd35_large", StableDiffusion3Pipeline),
|
||||
("sd35_medium", StableDiffusion3Pipeline),
|
||||
("stable_cascade_stage_b", None),
|
||||
("stable_cascade_stage_b_lite", None),
|
||||
("stable_cascade_stage_c", None),
|
||||
("stable_cascade_stage_c_lite", None),
|
||||
("upscale", StableDiffusionUpscalePipeline),
|
||||
("v1", StableDiffusionPipeline),
|
||||
("controlnet", StableDiffusionControlNetPipeline),
|
||||
("v2", StableDiffusionPipeline),
|
||||
("xl_base", StableDiffusionXLPipeline),
|
||||
("xl_inpaint", None),
|
||||
("xl_refiner", StableDiffusionXLPipeline),
|
||||
("v1", StableDiffusionPipeline),
|
||||
]
|
||||
)
|
||||
|
||||
SINGLE_FILE_CHECKPOINT_IMAGE2IMAGE_PIPELINE_MAPPING = OrderedDict(
|
||||
[
|
||||
("animatediff_rgb", AnimateDiffPipeline),
|
||||
("animatediff_scribble", AnimateDiffPipeline),
|
||||
("animatediff_sdxl_beta", AnimateDiffSDXLPipeline),
|
||||
("animatediff_v1", AnimateDiffPipeline),
|
||||
("animatediff_v2", AnimateDiffPipeline),
|
||||
("animatediff_v3", AnimateDiffPipeline),
|
||||
("autoencoder-dc-f128c512", None),
|
||||
("autoencoder-dc-f32c32", None),
|
||||
("autoencoder-dc-f32c32-sana", None),
|
||||
("autoencoder-dc-f64c128", None),
|
||||
("controlnet", StableDiffusionControlNetImg2ImgPipeline),
|
||||
("controlnet_xl", StableDiffusionXLControlNetImg2ImgPipeline),
|
||||
("controlnet_xl_large", StableDiffusionXLControlNetImg2ImgPipeline),
|
||||
("controlnet_xl_mid", StableDiffusionXLControlNetImg2ImgPipeline),
|
||||
("controlnet_xl_small", StableDiffusionXLControlNetImg2ImgPipeline),
|
||||
("flux-depth", FluxImg2ImgPipeline),
|
||||
("flux-dev", FluxImg2ImgPipeline),
|
||||
("flux-fill", FluxImg2ImgPipeline),
|
||||
("flux-schnell", FluxImg2ImgPipeline),
|
||||
("hunyuan-video", None),
|
||||
("xl_base", StableDiffusionXLImg2ImgPipeline),
|
||||
("xl_refiner", StableDiffusionXLImg2ImgPipeline),
|
||||
("xl_inpaint", None),
|
||||
("playground-v2-5", StableDiffusionXLImg2ImgPipeline),
|
||||
("upscale", None),
|
||||
("inpainting", None),
|
||||
("inpainting_v2", None),
|
||||
("ltx-video", None),
|
||||
("ltx-video-0.9.1", None),
|
||||
("mochi-1-preview", None),
|
||||
("playground-v2-5", StableDiffusionXLImg2ImgPipeline),
|
||||
("sd3", StableDiffusion3Img2ImgPipeline),
|
||||
("sd35_large", StableDiffusion3Img2ImgPipeline),
|
||||
("sd35_medium", StableDiffusion3Img2ImgPipeline),
|
||||
("stable_cascade_stage_b", None),
|
||||
("stable_cascade_stage_b_lite", None),
|
||||
("stable_cascade_stage_c", None),
|
||||
("stable_cascade_stage_c_lite", None),
|
||||
("upscale", StableDiffusionUpscalePipeline),
|
||||
("v1", StableDiffusionImg2ImgPipeline),
|
||||
("controlnet", StableDiffusionControlNetImg2ImgPipeline),
|
||||
("v2", StableDiffusionImg2ImgPipeline),
|
||||
("xl_base", StableDiffusionXLImg2ImgPipeline),
|
||||
("xl_inpaint", None),
|
||||
("xl_refiner", StableDiffusionXLImg2ImgPipeline),
|
||||
("v1", StableDiffusionImg2ImgPipeline),
|
||||
]
|
||||
)
|
||||
|
||||
SINGLE_FILE_CHECKPOINT_INPAINT_PIPELINE_MAPPING = OrderedDict(
|
||||
[
|
||||
("animatediff_rgb", None),
|
||||
("animatediff_scribble", None),
|
||||
("animatediff_sdxl_beta", None),
|
||||
("animatediff_v1", None),
|
||||
("animatediff_v2", None),
|
||||
("animatediff_v3", None),
|
||||
("autoencoder-dc-f128c512", None),
|
||||
("autoencoder-dc-f32c32", None),
|
||||
("autoencoder-dc-f32c32-sana", None),
|
||||
("autoencoder-dc-f64c128", None),
|
||||
("controlnet", StableDiffusionControlNetInpaintPipeline),
|
||||
("controlnet_xl", None),
|
||||
("controlnet_xl_large", None),
|
||||
("controlnet_xl_mid", None),
|
||||
("controlnet_xl_small", None),
|
||||
("flux-depth", None),
|
||||
("flux-dev", None),
|
||||
("flux-fill", None),
|
||||
("flux-schnell", None),
|
||||
("hunyuan-video", None),
|
||||
("xl_base", None),
|
||||
("xl_refiner", None),
|
||||
("xl_inpaint", StableDiffusionXLInpaintPipeline),
|
||||
("playground-v2-5", None),
|
||||
("upscale", None),
|
||||
("inpainting", StableDiffusionInpaintPipeline),
|
||||
("inpainting_v2", StableDiffusionInpaintPipeline),
|
||||
("ltx-video", None),
|
||||
("ltx-video-0.9.1", None),
|
||||
("mochi-1-preview", None),
|
||||
("playground-v2-5", None),
|
||||
("sd3", None),
|
||||
("sd35_large", None),
|
||||
("sd35_medium", None),
|
||||
("stable_cascade_stage_b", None),
|
||||
("stable_cascade_stage_b_lite", None),
|
||||
("stable_cascade_stage_c", None),
|
||||
("stable_cascade_stage_c_lite", None),
|
||||
("upscale", StableDiffusionUpscalePipeline),
|
||||
("v1", None),
|
||||
("controlnet", StableDiffusionControlNetInpaintPipeline),
|
||||
("v2", None),
|
||||
("xl_base", None),
|
||||
("xl_inpaint", StableDiffusionXLInpaintPipeline),
|
||||
("xl_refiner", None),
|
||||
("v1", None),
|
||||
]
|
||||
)
|
||||
|
||||
@@ -211,33 +116,14 @@ CONFIG_FILE_LIST = [
|
||||
"diffusion_pytorch_model.non_ema.safetensors",
|
||||
]
|
||||
|
||||
DIFFUSERS_CONFIG_DIR = [
|
||||
"safety_checker",
|
||||
"unet",
|
||||
"vae",
|
||||
"text_encoder",
|
||||
"text_encoder_2",
|
||||
DIFFUSERS_CONFIG_DIR = ["safety_checker", "unet", "vae", "text_encoder", "text_encoder_2"]
|
||||
|
||||
INPAINT_PIPELINE_KEYS = [
|
||||
"xl_inpaint",
|
||||
"inpainting",
|
||||
"inpainting_v2",
|
||||
]
|
||||
|
||||
TOKENIZER_SHAPE_MAP = {
|
||||
768: [
|
||||
"SD 1.4",
|
||||
"SD 1.5",
|
||||
"SD 1.5 LCM",
|
||||
"SDXL 0.9",
|
||||
"SDXL 1.0",
|
||||
"SDXL 1.0 LCM",
|
||||
"SDXL Distilled",
|
||||
"SDXL Turbo",
|
||||
"SDXL Lightning",
|
||||
"PixArt a",
|
||||
"Playground v2",
|
||||
"Pony",
|
||||
],
|
||||
1024: ["SD 2.0", "SD 2.0 768", "SD 2.1", "SD 2.1 768", "SD 2.1 Unclip"],
|
||||
}
|
||||
|
||||
|
||||
EXTENSION = [".safetensors", ".ckpt", ".bin"]
|
||||
|
||||
CACHE_HOME = os.path.expanduser("~/.cache")
|
||||
@@ -276,28 +162,12 @@ class ModelStatus:
|
||||
The name of the model file.
|
||||
local (`bool`):
|
||||
Whether the model exists locally
|
||||
site_url (`str`):
|
||||
The URL of the site where the model is hosted.
|
||||
"""
|
||||
|
||||
search_word: str = ""
|
||||
download_url: str = ""
|
||||
file_name: str = ""
|
||||
local: bool = False
|
||||
site_url: str = ""
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExtraStatus:
|
||||
r"""
|
||||
Data class for storing extra status information.
|
||||
|
||||
Attributes:
|
||||
trained_words (`str`):
|
||||
The words used to trigger the model
|
||||
"""
|
||||
|
||||
trained_words: Union[List[str], None] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -321,9 +191,8 @@ class SearchResult:
|
||||
model_path: str = ""
|
||||
loading_method: Union[str, None] = None
|
||||
checkpoint_format: Union[str, None] = None
|
||||
repo_status: RepoStatus = field(default_factory=RepoStatus)
|
||||
model_status: ModelStatus = field(default_factory=ModelStatus)
|
||||
extra_status: ExtraStatus = field(default_factory=ExtraStatus)
|
||||
repo_status: RepoStatus = RepoStatus()
|
||||
model_status: ModelStatus = ModelStatus()
|
||||
|
||||
|
||||
@validate_hf_hub_args
|
||||
@@ -516,7 +385,6 @@ def file_downloader(
|
||||
proxies = kwargs.pop("proxies", None)
|
||||
force_download = kwargs.pop("force_download", False)
|
||||
displayed_filename = kwargs.pop("displayed_filename", None)
|
||||
|
||||
# Default mode for file writing and initial file size
|
||||
mode = "wb"
|
||||
file_size = 0
|
||||
@@ -528,7 +396,7 @@ def file_downloader(
|
||||
if os.path.exists(save_path):
|
||||
if not force_download:
|
||||
# If the file exists and force_download is False, skip the download
|
||||
logger.info(f"File already exists: {save_path}, skipping download.")
|
||||
logger.warning(f"File already exists: {save_path}, skipping download.")
|
||||
return None
|
||||
elif resume:
|
||||
# If resuming, set mode to append binary and get current file size
|
||||
@@ -589,18 +457,10 @@ def search_huggingface(search_word: str, **kwargs) -> Union[str, SearchResult, N
|
||||
gated = kwargs.pop("gated", False)
|
||||
skip_error = kwargs.pop("skip_error", False)
|
||||
|
||||
file_list = []
|
||||
hf_repo_info = {}
|
||||
hf_security_info = {}
|
||||
model_path = ""
|
||||
repo_id, file_name = "", ""
|
||||
diffusers_model_exists = False
|
||||
|
||||
# Get the type and loading method for the keyword
|
||||
search_word_status = get_keyword_types(search_word)
|
||||
|
||||
if search_word_status["type"]["hf_repo"]:
|
||||
hf_repo_info = hf_api.model_info(repo_id=search_word, securityStatus=True)
|
||||
if download:
|
||||
model_path = DiffusionPipeline.download(
|
||||
search_word,
|
||||
@@ -643,6 +503,13 @@ def search_huggingface(search_word: str, **kwargs) -> Union[str, SearchResult, N
|
||||
)
|
||||
model_dicts = [asdict(value) for value in list(hf_models)]
|
||||
|
||||
file_list = []
|
||||
hf_repo_info = {}
|
||||
hf_security_info = {}
|
||||
model_path = ""
|
||||
repo_id, file_name = "", ""
|
||||
diffusers_model_exists = False
|
||||
|
||||
# Loop through models to find a suitable candidate
|
||||
for repo_info in model_dicts:
|
||||
repo_id = repo_info["id"]
|
||||
@@ -656,10 +523,7 @@ def search_huggingface(search_word: str, **kwargs) -> Union[str, SearchResult, N
|
||||
if hf_security_info["scansDone"]:
|
||||
for info in repo_info["siblings"]:
|
||||
file_path = info["rfilename"]
|
||||
if "model_index.json" == file_path and checkpoint_format in [
|
||||
"diffusers",
|
||||
"all",
|
||||
]:
|
||||
if "model_index.json" == file_path and checkpoint_format in ["diffusers", "all"]:
|
||||
diffusers_model_exists = True
|
||||
break
|
||||
|
||||
@@ -707,10 +571,6 @@ def search_huggingface(search_word: str, **kwargs) -> Union[str, SearchResult, N
|
||||
force_download=force_download,
|
||||
)
|
||||
|
||||
# `pathlib.PosixPath` may be returned
|
||||
if model_path:
|
||||
model_path = str(model_path)
|
||||
|
||||
if file_name:
|
||||
download_url = f"https://huggingface.co/{repo_id}/blob/main/{file_name}"
|
||||
else:
|
||||
@@ -726,12 +586,10 @@ def search_huggingface(search_word: str, **kwargs) -> Union[str, SearchResult, N
|
||||
repo_status=RepoStatus(repo_id=repo_id, repo_hash=hf_repo_info.sha, version=revision),
|
||||
model_status=ModelStatus(
|
||||
search_word=search_word,
|
||||
site_url=download_url,
|
||||
download_url=download_url,
|
||||
file_name=file_name,
|
||||
local=download,
|
||||
),
|
||||
extra_status=ExtraStatus(trained_words=None),
|
||||
)
|
||||
|
||||
else:
|
||||
@@ -747,8 +605,6 @@ def search_civitai(search_word: str, **kwargs) -> Union[str, SearchResult, None]
|
||||
The search query string.
|
||||
model_type (`str`, *optional*, defaults to `Checkpoint`):
|
||||
The type of model to search for.
|
||||
sort (`str`, *optional*):
|
||||
The order in which you wish to sort the results(for example, `Highest Rated`, `Most Downloaded`, `Newest`).
|
||||
base_model (`str`, *optional*):
|
||||
The base model to filter by.
|
||||
download (`bool`, *optional*, defaults to `False`):
|
||||
@@ -772,7 +628,6 @@ def search_civitai(search_word: str, **kwargs) -> Union[str, SearchResult, None]
|
||||
|
||||
# Extract additional parameters from kwargs
|
||||
model_type = kwargs.pop("model_type", "Checkpoint")
|
||||
sort = kwargs.pop("sort", None)
|
||||
download = kwargs.pop("download", False)
|
||||
base_model = kwargs.pop("base_model", None)
|
||||
force_download = kwargs.pop("force_download", False)
|
||||
@@ -787,7 +642,6 @@ def search_civitai(search_word: str, **kwargs) -> Union[str, SearchResult, None]
|
||||
repo_name = ""
|
||||
repo_id = ""
|
||||
version_id = ""
|
||||
trainedWords = ""
|
||||
models_list = []
|
||||
selected_repo = {}
|
||||
selected_model = {}
|
||||
@@ -798,16 +652,12 @@ def search_civitai(search_word: str, **kwargs) -> Union[str, SearchResult, None]
|
||||
params = {
|
||||
"query": search_word,
|
||||
"types": model_type,
|
||||
"sort": "Most Downloaded",
|
||||
"limit": 20,
|
||||
}
|
||||
if base_model is not None:
|
||||
if not isinstance(base_model, list):
|
||||
base_model = [base_model]
|
||||
params["baseModel"] = base_model
|
||||
|
||||
if sort is not None:
|
||||
params["sort"] = sort
|
||||
|
||||
headers = {}
|
||||
if token:
|
||||
headers["Authorization"] = f"Bearer {token}"
|
||||
@@ -836,30 +686,25 @@ def search_civitai(search_word: str, **kwargs) -> Union[str, SearchResult, None]
|
||||
|
||||
# Sort versions within the selected repo by download count
|
||||
sorted_versions = sorted(
|
||||
selected_repo["modelVersions"],
|
||||
key=lambda x: x["stats"]["downloadCount"],
|
||||
reverse=True,
|
||||
selected_repo["modelVersions"], key=lambda x: x["stats"]["downloadCount"], reverse=True
|
||||
)
|
||||
for selected_version in sorted_versions:
|
||||
version_id = selected_version["id"]
|
||||
trainedWords = selected_version["trainedWords"]
|
||||
models_list = []
|
||||
# When searching for textual inversion, results other than the values entered for the base model may come up, so check again.
|
||||
if base_model is None or selected_version["baseModel"] in base_model:
|
||||
for model_data in selected_version["files"]:
|
||||
# Check if the file passes security scans and has a valid extension
|
||||
file_name = model_data["name"]
|
||||
if (
|
||||
model_data["pickleScanResult"] == "Success"
|
||||
and model_data["virusScanResult"] == "Success"
|
||||
and any(file_name.endswith(ext) for ext in EXTENSION)
|
||||
and os.path.basename(os.path.dirname(file_name)) not in DIFFUSERS_CONFIG_DIR
|
||||
):
|
||||
file_status = {
|
||||
"filename": file_name,
|
||||
"download_url": model_data["downloadUrl"],
|
||||
}
|
||||
models_list.append(file_status)
|
||||
for model_data in selected_version["files"]:
|
||||
# Check if the file passes security scans and has a valid extension
|
||||
file_name = model_data["name"]
|
||||
if (
|
||||
model_data["pickleScanResult"] == "Success"
|
||||
and model_data["virusScanResult"] == "Success"
|
||||
and any(file_name.endswith(ext) for ext in EXTENSION)
|
||||
and os.path.basename(os.path.dirname(file_name)) not in DIFFUSERS_CONFIG_DIR
|
||||
):
|
||||
file_status = {
|
||||
"filename": file_name,
|
||||
"download_url": model_data["downloadUrl"],
|
||||
}
|
||||
models_list.append(file_status)
|
||||
|
||||
if models_list:
|
||||
# Sort the models list by filename and find the safest model
|
||||
@@ -919,229 +764,19 @@ def search_civitai(search_word: str, **kwargs) -> Union[str, SearchResult, None]
|
||||
repo_status=RepoStatus(repo_id=repo_name, repo_hash=repo_id, version=version_id),
|
||||
model_status=ModelStatus(
|
||||
search_word=search_word,
|
||||
site_url=f"https://civitai.com/models/{repo_id}?modelVersionId={version_id}",
|
||||
download_url=download_url,
|
||||
file_name=file_name,
|
||||
local=output_info["type"]["local"],
|
||||
),
|
||||
extra_status=ExtraStatus(trained_words=trainedWords or None),
|
||||
)
|
||||
|
||||
|
||||
def add_methods(pipeline):
|
||||
r"""
|
||||
Add methods from `AutoConfig` to the pipeline.
|
||||
|
||||
Parameters:
|
||||
pipeline (`Pipeline`):
|
||||
The pipeline to which the methods will be added.
|
||||
"""
|
||||
for attr_name in dir(AutoConfig):
|
||||
attr_value = getattr(AutoConfig, attr_name)
|
||||
if callable(attr_value) and not attr_name.startswith("__"):
|
||||
setattr(pipeline, attr_name, types.MethodType(attr_value, pipeline))
|
||||
return pipeline
|
||||
|
||||
|
||||
class AutoConfig:
|
||||
def auto_load_textual_inversion(
|
||||
self,
|
||||
pretrained_model_name_or_path: Union[str, List[str]],
|
||||
token: Optional[Union[str, List[str]]] = None,
|
||||
base_model: Optional[Union[str, List[str]]] = None,
|
||||
tokenizer=None,
|
||||
text_encoder=None,
|
||||
**kwargs,
|
||||
):
|
||||
r"""
|
||||
Load Textual Inversion embeddings into the text encoder of [`StableDiffusionPipeline`] (both 🤗 Diffusers and
|
||||
Automatic1111 formats are supported).
|
||||
|
||||
Parameters:
|
||||
pretrained_model_name_or_path (`str` or `os.PathLike` or `List[str or os.PathLike]` or `Dict` or `List[Dict]`):
|
||||
Can be either one of the following or a list of them:
|
||||
|
||||
- Search keywords for pretrained model (for example `EasyNegative`).
|
||||
- A string, the *model id* (for example `sd-concepts-library/low-poly-hd-logos-icons`) of a
|
||||
pretrained model hosted on the Hub.
|
||||
- A path to a *directory* (for example `./my_text_inversion_directory/`) containing the textual
|
||||
inversion weights.
|
||||
- A path to a *file* (for example `./my_text_inversions.pt`) containing textual inversion weights.
|
||||
- A [torch state
|
||||
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
|
||||
|
||||
token (`str` or `List[str]`, *optional*):
|
||||
Override the token to use for the textual inversion weights. If `pretrained_model_name_or_path` is a
|
||||
list, then `token` must also be a list of equal length.
|
||||
text_encoder ([`~transformers.CLIPTextModel`], *optional*):
|
||||
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
||||
If not specified, function will take self.tokenizer.
|
||||
tokenizer ([`~transformers.CLIPTokenizer`], *optional*):
|
||||
A `CLIPTokenizer` to tokenize text. If not specified, function will take self.tokenizer.
|
||||
weight_name (`str`, *optional*):
|
||||
Name of a custom weight file. This should be used when:
|
||||
|
||||
- The saved textual inversion file is in 🤗 Diffusers format, but was saved under a specific weight
|
||||
name such as `text_inv.bin`.
|
||||
- The saved textual inversion file is in the Automatic1111 format.
|
||||
cache_dir (`Union[str, os.PathLike]`, *optional*):
|
||||
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
|
||||
is not used.
|
||||
force_download (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
|
||||
cached versions if they exist.
|
||||
|
||||
proxies (`Dict[str, str]`, *optional*):
|
||||
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
|
||||
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
|
||||
local_files_only (`bool`, *optional*, defaults to `False`):
|
||||
Whether to only load local model weights and configuration files or not. If set to `True`, the model
|
||||
won't be downloaded from the Hub.
|
||||
token (`str` or *bool*, *optional*):
|
||||
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
|
||||
`diffusers-cli login` (stored in `~/.huggingface`) is used.
|
||||
revision (`str`, *optional*, defaults to `"main"`):
|
||||
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
|
||||
allowed by Git.
|
||||
subfolder (`str`, *optional*, defaults to `""`):
|
||||
The subfolder location of a model file within a larger model repository on the Hub or locally.
|
||||
mirror (`str`, *optional*):
|
||||
Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
|
||||
guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
|
||||
information.
|
||||
|
||||
Examples:
|
||||
|
||||
```py
|
||||
>>> from auto_diffusers import EasyPipelineForText2Image
|
||||
|
||||
>>> pipeline = EasyPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
|
||||
>>> pipeline.auto_load_textual_inversion("EasyNegative", token="EasyNegative")
|
||||
|
||||
>>> image = pipeline(prompt).images[0]
|
||||
```
|
||||
|
||||
"""
|
||||
# 1. Set tokenizer and text encoder
|
||||
tokenizer = tokenizer or getattr(self, "tokenizer", None)
|
||||
text_encoder = text_encoder or getattr(self, "text_encoder", None)
|
||||
|
||||
# Check if tokenizer and text encoder are provided
|
||||
if tokenizer is None or text_encoder is None:
|
||||
raise ValueError("Tokenizer and text encoder must be provided.")
|
||||
|
||||
# 2. Normalize inputs
|
||||
pretrained_model_name_or_paths = (
|
||||
[pretrained_model_name_or_path]
|
||||
if not isinstance(pretrained_model_name_or_path, list)
|
||||
else pretrained_model_name_or_path
|
||||
)
|
||||
|
||||
# 2.1 Normalize tokens
|
||||
tokens = [token] if not isinstance(token, list) else token
|
||||
if tokens[0] is None:
|
||||
tokens = tokens * len(pretrained_model_name_or_paths)
|
||||
|
||||
for check_token in tokens:
|
||||
# Check if token is already in tokenizer vocabulary
|
||||
if check_token in tokenizer.get_vocab():
|
||||
raise ValueError(
|
||||
f"Token {token} already in tokenizer vocabulary. Please choose a different token name or remove {token} and embedding from the tokenizer and text encoder."
|
||||
)
|
||||
|
||||
expected_shape = text_encoder.get_input_embeddings().weight.shape[-1] # Expected shape of tokenizer
|
||||
|
||||
for search_word in pretrained_model_name_or_paths:
|
||||
if isinstance(search_word, str):
|
||||
# Update kwargs to ensure the model is downloaded and parameters are included
|
||||
_status = {
|
||||
"download": True,
|
||||
"include_params": True,
|
||||
"skip_error": False,
|
||||
"model_type": "TextualInversion",
|
||||
}
|
||||
# Get tags for the base model of textual inversion compatible with tokenizer.
|
||||
# If the tokenizer is 768-dimensional, set tags for SD 1.x and SDXL.
|
||||
# If the tokenizer is 1024-dimensional, set tags for SD 2.x.
|
||||
if expected_shape in TOKENIZER_SHAPE_MAP:
|
||||
# Retrieve the appropriate tags from the TOKENIZER_SHAPE_MAP based on the expected shape
|
||||
tags = TOKENIZER_SHAPE_MAP[expected_shape]
|
||||
if base_model is not None:
|
||||
if isinstance(base_model, list):
|
||||
tags.extend(base_model)
|
||||
else:
|
||||
tags.append(base_model)
|
||||
_status["base_model"] = tags
|
||||
|
||||
kwargs.update(_status)
|
||||
# Search for the model on Civitai and get the model status
|
||||
textual_inversion_path = search_civitai(search_word, **kwargs)
|
||||
logger.warning(
|
||||
f"textual_inversion_path: {search_word} -> {textual_inversion_path.model_status.site_url}"
|
||||
)
|
||||
|
||||
pretrained_model_name_or_paths[
|
||||
pretrained_model_name_or_paths.index(search_word)
|
||||
] = textual_inversion_path.model_path
|
||||
|
||||
self.load_textual_inversion(
|
||||
pretrained_model_name_or_paths, token=tokens, tokenizer=tokenizer, text_encoder=text_encoder, **kwargs
|
||||
)
|
||||
|
||||
def auto_load_lora_weights(
|
||||
self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, **kwargs
|
||||
):
|
||||
r"""
|
||||
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
|
||||
`self.text_encoder`.
|
||||
|
||||
All kwargs are forwarded to `self.lora_state_dict`.
|
||||
|
||||
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is
|
||||
loaded.
|
||||
|
||||
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is
|
||||
loaded into `self.unet`.
|
||||
|
||||
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state
|
||||
dict is loaded into `self.text_encoder`.
|
||||
|
||||
Parameters:
|
||||
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
|
||||
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
|
||||
adapter_name (`str`, *optional*):
|
||||
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
|
||||
`default_{i}` where i is the total number of adapters being loaded.
|
||||
low_cpu_mem_usage (`bool`, *optional*):
|
||||
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
|
||||
weights.
|
||||
kwargs (`dict`, *optional*):
|
||||
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
|
||||
"""
|
||||
if isinstance(pretrained_model_name_or_path_or_dict, str):
|
||||
# Update kwargs to ensure the model is downloaded and parameters are included
|
||||
_status = {
|
||||
"download": True,
|
||||
"include_params": True,
|
||||
"skip_error": False,
|
||||
"model_type": "LORA",
|
||||
}
|
||||
kwargs.update(_status)
|
||||
# Search for the model on Civitai and get the model status
|
||||
lora_path = search_civitai(pretrained_model_name_or_path_or_dict, **kwargs)
|
||||
logger.warning(f"lora_path: {lora_path.model_status.site_url}")
|
||||
logger.warning(f"trained_words: {lora_path.extra_status.trained_words}")
|
||||
pretrained_model_name_or_path_or_dict = lora_path.model_path
|
||||
|
||||
self.load_lora_weights(pretrained_model_name_or_path_or_dict, adapter_name=adapter_name, **kwargs)
|
||||
|
||||
|
||||
class EasyPipelineForText2Image(AutoPipelineForText2Image):
|
||||
r"""
|
||||
[`EasyPipelineForText2Image`] is a generic pipeline class that instantiates a text-to-image pipeline class. The
|
||||
|
||||
[`AutoPipelineForText2Image`] is a generic pipeline class that instantiates a text-to-image pipeline class. The
|
||||
specific underlying pipeline class is automatically selected from either the
|
||||
[`~EasyPipelineForText2Image.from_pretrained`], [`~EasyPipelineForText2Image.from_pipe`], [`~EasyPipelineForText2Image.from_huggingface`] or [`~EasyPipelineForText2Image.from_civitai`] methods.
|
||||
[`~AutoPipelineForText2Image.from_pretrained`] or [`~AutoPipelineForText2Image.from_pipe`] methods.
|
||||
|
||||
This class cannot be instantiated using `__init__()` (throws an error).
|
||||
|
||||
@@ -1256,9 +891,9 @@ class EasyPipelineForText2Image(AutoPipelineForText2Image):
|
||||
Examples:
|
||||
|
||||
```py
|
||||
>>> from auto_diffusers import EasyPipelineForText2Image
|
||||
>>> from diffusers import AutoPipelineForText2Image
|
||||
|
||||
>>> pipeline = EasyPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> pipeline = AutoPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> image = pipeline(prompt).images[0]
|
||||
```
|
||||
"""
|
||||
@@ -1272,21 +907,20 @@ class EasyPipelineForText2Image(AutoPipelineForText2Image):
|
||||
kwargs.update(_status)
|
||||
|
||||
# Search for the model on Hugging Face and get the model status
|
||||
hf_checkpoint_status = search_huggingface(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {hf_checkpoint_status.model_status.download_url}")
|
||||
checkpoint_path = hf_checkpoint_status.model_path
|
||||
hf_model_status = search_huggingface(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {hf_model_status.model_status.download_url}")
|
||||
checkpoint_path = hf_model_status.model_path
|
||||
|
||||
# Check the format of the model checkpoint
|
||||
if hf_checkpoint_status.loading_method == "from_single_file":
|
||||
if hf_model_status.checkpoint_format == "single_file":
|
||||
# Load the pipeline from a single file checkpoint
|
||||
pipeline = load_pipeline_from_single_file(
|
||||
return load_pipeline_from_single_file(
|
||||
pretrained_model_or_path=checkpoint_path,
|
||||
pipeline_mapping=SINGLE_FILE_CHECKPOINT_TEXT2IMAGE_PIPELINE_MAPPING,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
pipeline = cls.from_pretrained(checkpoint_path, **kwargs)
|
||||
return add_methods(pipeline)
|
||||
return cls.from_pretrained(checkpoint_path, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def from_civitai(cls, pretrained_model_link_or_path, **kwargs):
|
||||
@@ -1365,9 +999,9 @@ class EasyPipelineForText2Image(AutoPipelineForText2Image):
|
||||
Examples:
|
||||
|
||||
```py
|
||||
>>> from auto_diffusers import EasyPipelineForText2Image
|
||||
>>> from diffusers import AutoPipelineForText2Image
|
||||
|
||||
>>> pipeline = EasyPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> pipeline = AutoPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> image = pipeline(prompt).images[0]
|
||||
```
|
||||
"""
|
||||
@@ -1381,25 +1015,24 @@ class EasyPipelineForText2Image(AutoPipelineForText2Image):
|
||||
kwargs.update(_status)
|
||||
|
||||
# Search for the model on Civitai and get the model status
|
||||
checkpoint_status = search_civitai(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {checkpoint_status.model_status.site_url}")
|
||||
checkpoint_path = checkpoint_status.model_path
|
||||
model_status = search_civitai(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {model_status.model_status.download_url}")
|
||||
checkpoint_path = model_status.model_path
|
||||
|
||||
# Load the pipeline from a single file checkpoint
|
||||
pipeline = load_pipeline_from_single_file(
|
||||
return load_pipeline_from_single_file(
|
||||
pretrained_model_or_path=checkpoint_path,
|
||||
pipeline_mapping=SINGLE_FILE_CHECKPOINT_TEXT2IMAGE_PIPELINE_MAPPING,
|
||||
**kwargs,
|
||||
)
|
||||
return add_methods(pipeline)
|
||||
|
||||
|
||||
class EasyPipelineForImage2Image(AutoPipelineForImage2Image):
|
||||
r"""
|
||||
|
||||
[`EasyPipelineForImage2Image`] is a generic pipeline class that instantiates an image-to-image pipeline class. The
|
||||
[`AutoPipelineForImage2Image`] is a generic pipeline class that instantiates an image-to-image pipeline class. The
|
||||
specific underlying pipeline class is automatically selected from either the
|
||||
[`~EasyPipelineForImage2Image.from_pretrained`], [`~EasyPipelineForImage2Image.from_pipe`], [`~EasyPipelineForImage2Image.from_huggingface`] or [`~EasyPipelineForImage2Image.from_civitai`] methods.
|
||||
[`~AutoPipelineForImage2Image.from_pretrained`] or [`~AutoPipelineForImage2Image.from_pipe`] methods.
|
||||
|
||||
This class cannot be instantiated using `__init__()` (throws an error).
|
||||
|
||||
@@ -1514,10 +1147,10 @@ class EasyPipelineForImage2Image(AutoPipelineForImage2Image):
|
||||
Examples:
|
||||
|
||||
```py
|
||||
>>> from auto_diffusers import EasyPipelineForImage2Image
|
||||
>>> from diffusers import AutoPipelineForText2Image
|
||||
|
||||
>>> pipeline = EasyPipelineForImage2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> image = pipeline(prompt, image).images[0]
|
||||
>>> pipeline = AutoPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> image = pipeline(prompt).images[0]
|
||||
```
|
||||
"""
|
||||
# Update kwargs to ensure the model is downloaded and parameters are included
|
||||
@@ -1530,22 +1163,20 @@ class EasyPipelineForImage2Image(AutoPipelineForImage2Image):
|
||||
kwargs.update(_parmas)
|
||||
|
||||
# Search for the model on Hugging Face and get the model status
|
||||
hf_checkpoint_status = search_huggingface(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {hf_checkpoint_status.model_status.download_url}")
|
||||
checkpoint_path = hf_checkpoint_status.model_path
|
||||
model_status = search_huggingface(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {model_status.model_status.download_url}")
|
||||
checkpoint_path = model_status.model_path
|
||||
|
||||
# Check the format of the model checkpoint
|
||||
if hf_checkpoint_status.loading_method == "from_single_file":
|
||||
if model_status.checkpoint_format == "single_file":
|
||||
# Load the pipeline from a single file checkpoint
|
||||
pipeline = load_pipeline_from_single_file(
|
||||
return load_pipeline_from_single_file(
|
||||
pretrained_model_or_path=checkpoint_path,
|
||||
pipeline_mapping=SINGLE_FILE_CHECKPOINT_IMAGE2IMAGE_PIPELINE_MAPPING,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
pipeline = cls.from_pretrained(checkpoint_path, **kwargs)
|
||||
|
||||
return add_methods(pipeline)
|
||||
return cls.from_pretrained(checkpoint_path, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def from_civitai(cls, pretrained_model_link_or_path, **kwargs):
|
||||
@@ -1624,10 +1255,10 @@ class EasyPipelineForImage2Image(AutoPipelineForImage2Image):
|
||||
Examples:
|
||||
|
||||
```py
|
||||
>>> from auto_diffusers import EasyPipelineForImage2Image
|
||||
>>> from diffusers import AutoPipelineForText2Image
|
||||
|
||||
>>> pipeline = EasyPipelineForImage2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> image = pipeline(prompt, image).images[0]
|
||||
>>> pipeline = AutoPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> image = pipeline(prompt).images[0]
|
||||
```
|
||||
"""
|
||||
# Update kwargs to ensure the model is downloaded and parameters are included
|
||||
@@ -1640,25 +1271,24 @@ class EasyPipelineForImage2Image(AutoPipelineForImage2Image):
|
||||
kwargs.update(_status)
|
||||
|
||||
# Search for the model on Civitai and get the model status
|
||||
checkpoint_status = search_civitai(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {checkpoint_status.model_status.site_url}")
|
||||
checkpoint_path = checkpoint_status.model_path
|
||||
model_status = search_civitai(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {model_status.model_status.download_url}")
|
||||
checkpoint_path = model_status.model_path
|
||||
|
||||
# Load the pipeline from a single file checkpoint
|
||||
pipeline = load_pipeline_from_single_file(
|
||||
return load_pipeline_from_single_file(
|
||||
pretrained_model_or_path=checkpoint_path,
|
||||
pipeline_mapping=SINGLE_FILE_CHECKPOINT_IMAGE2IMAGE_PIPELINE_MAPPING,
|
||||
**kwargs,
|
||||
)
|
||||
return add_methods(pipeline)
|
||||
|
||||
|
||||
class EasyPipelineForInpainting(AutoPipelineForInpainting):
|
||||
r"""
|
||||
|
||||
[`EasyPipelineForInpainting`] is a generic pipeline class that instantiates an inpainting pipeline class. The
|
||||
[`AutoPipelineForInpainting`] is a generic pipeline class that instantiates an inpainting pipeline class. The
|
||||
specific underlying pipeline class is automatically selected from either the
|
||||
[`~EasyPipelineForInpainting.from_pretrained`], [`~EasyPipelineForInpainting.from_pipe`], [`~EasyPipelineForInpainting.from_huggingface`] or [`~EasyPipelineForInpainting.from_civitai`] methods.
|
||||
[`~AutoPipelineForInpainting.from_pretrained`] or [`~AutoPipelineForInpainting.from_pipe`] methods.
|
||||
|
||||
This class cannot be instantiated using `__init__()` (throws an error).
|
||||
|
||||
@@ -1773,10 +1403,10 @@ class EasyPipelineForInpainting(AutoPipelineForInpainting):
|
||||
Examples:
|
||||
|
||||
```py
|
||||
>>> from auto_diffusers import EasyPipelineForInpainting
|
||||
>>> from diffusers import AutoPipelineForText2Image
|
||||
|
||||
>>> pipeline = EasyPipelineForInpainting.from_huggingface("stable-diffusion-2-inpainting")
|
||||
>>> image = pipeline(prompt, image=init_image, mask_image=mask_image).images[0]
|
||||
>>> pipeline = AutoPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> image = pipeline(prompt).images[0]
|
||||
```
|
||||
"""
|
||||
# Update kwargs to ensure the model is downloaded and parameters are included
|
||||
@@ -1789,21 +1419,20 @@ class EasyPipelineForInpainting(AutoPipelineForInpainting):
|
||||
kwargs.update(_status)
|
||||
|
||||
# Search for the model on Hugging Face and get the model status
|
||||
hf_checkpoint_status = search_huggingface(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {hf_checkpoint_status.model_status.download_url}")
|
||||
checkpoint_path = hf_checkpoint_status.model_path
|
||||
model_status = search_huggingface(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {model_status.model_status.download_url}")
|
||||
checkpoint_path = model_status.model_path
|
||||
|
||||
# Check the format of the model checkpoint
|
||||
if hf_checkpoint_status.loading_method == "from_single_file":
|
||||
if model_status.checkpoint_format == "single_file":
|
||||
# Load the pipeline from a single file checkpoint
|
||||
pipeline = load_pipeline_from_single_file(
|
||||
return load_pipeline_from_single_file(
|
||||
pretrained_model_or_path=checkpoint_path,
|
||||
pipeline_mapping=SINGLE_FILE_CHECKPOINT_INPAINT_PIPELINE_MAPPING,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
pipeline = cls.from_pretrained(checkpoint_path, **kwargs)
|
||||
return add_methods(pipeline)
|
||||
return cls.from_pretrained(checkpoint_path, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def from_civitai(cls, pretrained_model_link_or_path, **kwargs):
|
||||
@@ -1882,10 +1511,10 @@ class EasyPipelineForInpainting(AutoPipelineForInpainting):
|
||||
Examples:
|
||||
|
||||
```py
|
||||
>>> from auto_diffusers import EasyPipelineForInpainting
|
||||
>>> from diffusers import AutoPipelineForText2Image
|
||||
|
||||
>>> pipeline = EasyPipelineForInpainting.from_huggingface("stable-diffusion-2-inpainting")
|
||||
>>> image = pipeline(prompt, image=init_image, mask_image=mask_image).images[0]
|
||||
>>> pipeline = AutoPipelineForText2Image.from_huggingface("stable-diffusion-v1-5")
|
||||
>>> image = pipeline(prompt).images[0]
|
||||
```
|
||||
"""
|
||||
# Update kwargs to ensure the model is downloaded and parameters are included
|
||||
@@ -1898,14 +1527,13 @@ class EasyPipelineForInpainting(AutoPipelineForInpainting):
|
||||
kwargs.update(_status)
|
||||
|
||||
# Search for the model on Civitai and get the model status
|
||||
checkpoint_status = search_civitai(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {checkpoint_status.model_status.site_url}")
|
||||
checkpoint_path = checkpoint_status.model_path
|
||||
model_status = search_civitai(pretrained_model_link_or_path, **kwargs)
|
||||
logger.warning(f"checkpoint_path: {model_status.model_status.download_url}")
|
||||
checkpoint_path = model_status.model_path
|
||||
|
||||
# Load the pipeline from a single file checkpoint
|
||||
pipeline = load_pipeline_from_single_file(
|
||||
return load_pipeline_from_single_file(
|
||||
pretrained_model_or_path=checkpoint_path,
|
||||
pipeline_mapping=SINGLE_FILE_CHECKPOINT_INPAINT_PIPELINE_MAPPING,
|
||||
**kwargs,
|
||||
)
|
||||
return add_methods(pipeline)
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
# AutoencoderKL training example
|
||||
|
||||
## Installing the dependencies
|
||||
|
||||
Before running the scripts, make sure to install the library's training dependencies:
|
||||
|
||||
**Important**
|
||||
|
||||
To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/diffusers
|
||||
cd diffusers
|
||||
pip install .
|
||||
```
|
||||
|
||||
Then cd in the example folder and run
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
|
||||
And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
## Training on CIFAR10
|
||||
|
||||
Please replace the validation image with your own image.
|
||||
|
||||
```bash
|
||||
accelerate launch train_autoencoderkl.py \
|
||||
--pretrained_model_name_or_path stabilityai/sd-vae-ft-mse \
|
||||
--dataset_name=cifar10 \
|
||||
--image_column=img \
|
||||
--validation_image images/bird.jpg images/car.jpg images/dog.jpg images/frog.jpg \
|
||||
--num_train_epochs 100 \
|
||||
--gradient_accumulation_steps 2 \
|
||||
--learning_rate 4.5e-6 \
|
||||
--lr_scheduler cosine \
|
||||
--report_to wandb \
|
||||
```
|
||||
|
||||
## Training on ImageNet
|
||||
|
||||
```bash
|
||||
accelerate launch train_autoencoderkl.py \
|
||||
--pretrained_model_name_or_path stabilityai/sd-vae-ft-mse \
|
||||
--num_train_epochs 100 \
|
||||
--gradient_accumulation_steps 2 \
|
||||
--learning_rate 4.5e-6 \
|
||||
--lr_scheduler cosine \
|
||||
--report_to wandb \
|
||||
--mixed_precision bf16 \
|
||||
--train_data_dir /path/to/ImageNet/train \
|
||||
--validation_image ./image.png \
|
||||
--decoder_only
|
||||
```
|
||||
@@ -1,15 +0,0 @@
|
||||
accelerate>=0.16.0
|
||||
bitsandbytes
|
||||
datasets
|
||||
huggingface_hub
|
||||
lpips
|
||||
numpy
|
||||
packaging
|
||||
Pillow
|
||||
taming_transformers
|
||||
torch
|
||||
torchvision
|
||||
tqdm
|
||||
transformers
|
||||
wandb
|
||||
xformers
|
||||
File diff suppressed because it is too large
Load Diff
@@ -8,6 +8,7 @@ from diffusers.models import PixArtTransformer2DModel
|
||||
from diffusers.models.attention import BasicTransformerBlock
|
||||
from diffusers.models.modeling_outputs import Transformer2DModelOutput
|
||||
from diffusers.models.modeling_utils import ModelMixin
|
||||
from diffusers.utils.torch_utils import is_torch_version
|
||||
|
||||
|
||||
class PixArtControlNetAdapterBlock(nn.Module):
|
||||
@@ -150,6 +151,10 @@ class PixArtControlNetTransformerModel(ModelMixin, ConfigMixin):
|
||||
self.transformer = transformer
|
||||
self.controlnet = controlnet
|
||||
|
||||
def _set_gradient_checkpointing(self, module, value=False):
|
||||
if hasattr(module, "gradient_checkpointing"):
|
||||
module.gradient_checkpointing = value
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states: torch.Tensor,
|
||||
@@ -215,8 +220,18 @@ class PixArtControlNetTransformerModel(ModelMixin, ConfigMixin):
|
||||
print("Gradient checkpointing is not supported for the controlnet transformer model, yet.")
|
||||
exit(1)
|
||||
|
||||
hidden_states = self._gradient_checkpointing_func(
|
||||
block,
|
||||
def create_custom_forward(module, return_dict=None):
|
||||
def custom_forward(*inputs):
|
||||
if return_dict is not None:
|
||||
return module(*inputs, return_dict=return_dict)
|
||||
else:
|
||||
return module(*inputs)
|
||||
|
||||
return custom_forward
|
||||
|
||||
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
|
||||
hidden_states = torch.utils.checkpoint.checkpoint(
|
||||
create_custom_forward(block),
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
encoder_hidden_states,
|
||||
@@ -224,6 +239,7 @@ class PixArtControlNetTransformerModel(ModelMixin, ConfigMixin):
|
||||
timestep,
|
||||
cross_attention_kwargs,
|
||||
None,
|
||||
**ckpt_kwargs,
|
||||
)
|
||||
else:
|
||||
# the control nets are only used for the blocks 1 to self.blocks_num
|
||||
|
||||
@@ -365,8 +365,8 @@ def parse_args():
|
||||
"--dream_training",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Use the DREAM training method, which makes training more efficient and accurate at the "
|
||||
"expense of doing an extra forward pass. See: https://arxiv.org/abs/2312.00210"
|
||||
"Use the DREAM training method, which makes training more efficient and accurate at the ",
|
||||
"expense of doing an extra forward pass. See: https://arxiv.org/abs/2312.00210",
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
|
||||
@@ -515,6 +515,10 @@ def main():
|
||||
elif accelerator.mixed_precision == "bf16":
|
||||
weight_dtype = torch.bfloat16
|
||||
|
||||
# Freeze the unet parameters before adding adapters
|
||||
for param in unet.parameters():
|
||||
param.requires_grad_(False)
|
||||
|
||||
unet_lora_config = LoraConfig(
|
||||
r=args.rank,
|
||||
lora_alpha=args.rank,
|
||||
|
||||
@@ -1,243 +0,0 @@
|
||||
"""
|
||||
Convert a CogView4 checkpoint from SAT(https://github.com/THUDM/SwissArmyTransformer) to the Diffusers format.
|
||||
(deprecated Since 2025-02-07 and will remove it in later CogView4 version)
|
||||
|
||||
This script converts a CogView4 checkpoint to the Diffusers format, which can then be used
|
||||
with the Diffusers library.
|
||||
|
||||
Example usage:
|
||||
python scripts/convert_cogview4_to_diffusers.py \
|
||||
--transformer_checkpoint_path 'your path/cogview4_6b/1/mp_rank_00_model_states.pt' \
|
||||
--vae_checkpoint_path 'your path/cogview4_6b/imagekl_ch16.pt' \
|
||||
--output_path "THUDM/CogView4-6B" \
|
||||
--dtype "bf16"
|
||||
|
||||
Arguments:
|
||||
--transformer_checkpoint_path: Path to Transformer state dict.
|
||||
--vae_checkpoint_path: Path to VAE state dict.
|
||||
--output_path: The path to save the converted model.
|
||||
--push_to_hub: Whether to push the converted checkpoint to the HF Hub or not. Defaults to `False`.
|
||||
--text_encoder_cache_dir: Cache directory where text encoder is located. Defaults to None, which means HF_HOME will be used
|
||||
--dtype: The dtype to save the model in (default: "bf16", options: "fp16", "bf16", "fp32"). If None, the dtype of the state dict is considered.
|
||||
|
||||
Default is "bf16" because CogView4 uses bfloat16 for Training.
|
||||
|
||||
Note: You must provide either --original_state_dict_repo_id or --checkpoint_path.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
from contextlib import nullcontext
|
||||
|
||||
import torch
|
||||
from accelerate import init_empty_weights
|
||||
from transformers import GlmForCausalLM, PreTrainedTokenizerFast
|
||||
|
||||
from diffusers import AutoencoderKL, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler
|
||||
from diffusers.loaders.single_file_utils import convert_ldm_vae_checkpoint
|
||||
from diffusers.utils.import_utils import is_accelerate_available
|
||||
|
||||
|
||||
CTX = init_empty_weights if is_accelerate_available() else nullcontext
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--transformer_checkpoint_path", default=None, type=str)
|
||||
parser.add_argument("--vae_checkpoint_path", default=None, type=str)
|
||||
parser.add_argument("--output_path", required=True, type=str)
|
||||
parser.add_argument("--push_to_hub", action="store_true", default=False, help="Whether to push to HF Hub after saving")
|
||||
parser.add_argument("--text_encoder_cache_dir", type=str, default=None, help="Path to text encoder cache directory")
|
||||
parser.add_argument("--dtype", type=str, default="bf16")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
# this is specific to `AdaLayerNormContinuous`:
|
||||
# diffusers implementation split the linear projection into the scale, shift while CogView4 split it tino shift, scale
|
||||
def swap_scale_shift(weight, dim):
|
||||
shift, scale = weight.chunk(2, dim=0)
|
||||
new_weight = torch.cat([scale, shift], dim=0)
|
||||
return new_weight
|
||||
|
||||
|
||||
def convert_cogview4_transformer_checkpoint_to_diffusers(ckpt_path):
|
||||
original_state_dict = torch.load(ckpt_path, map_location="cpu")
|
||||
original_state_dict = original_state_dict["module"]
|
||||
original_state_dict = {k.replace("model.diffusion_model.", ""): v for k, v in original_state_dict.items()}
|
||||
|
||||
new_state_dict = {}
|
||||
|
||||
# Convert patch_embed
|
||||
new_state_dict["patch_embed.proj.weight"] = original_state_dict.pop("mixins.patch_embed.proj.weight")
|
||||
new_state_dict["patch_embed.proj.bias"] = original_state_dict.pop("mixins.patch_embed.proj.bias")
|
||||
new_state_dict["patch_embed.text_proj.weight"] = original_state_dict.pop("mixins.patch_embed.text_proj.weight")
|
||||
new_state_dict["patch_embed.text_proj.bias"] = original_state_dict.pop("mixins.patch_embed.text_proj.bias")
|
||||
|
||||
# Convert time_condition_embed
|
||||
new_state_dict["time_condition_embed.timestep_embedder.linear_1.weight"] = original_state_dict.pop(
|
||||
"time_embed.0.weight"
|
||||
)
|
||||
new_state_dict["time_condition_embed.timestep_embedder.linear_1.bias"] = original_state_dict.pop(
|
||||
"time_embed.0.bias"
|
||||
)
|
||||
new_state_dict["time_condition_embed.timestep_embedder.linear_2.weight"] = original_state_dict.pop(
|
||||
"time_embed.2.weight"
|
||||
)
|
||||
new_state_dict["time_condition_embed.timestep_embedder.linear_2.bias"] = original_state_dict.pop(
|
||||
"time_embed.2.bias"
|
||||
)
|
||||
new_state_dict["time_condition_embed.condition_embedder.linear_1.weight"] = original_state_dict.pop(
|
||||
"label_emb.0.0.weight"
|
||||
)
|
||||
new_state_dict["time_condition_embed.condition_embedder.linear_1.bias"] = original_state_dict.pop(
|
||||
"label_emb.0.0.bias"
|
||||
)
|
||||
new_state_dict["time_condition_embed.condition_embedder.linear_2.weight"] = original_state_dict.pop(
|
||||
"label_emb.0.2.weight"
|
||||
)
|
||||
new_state_dict["time_condition_embed.condition_embedder.linear_2.bias"] = original_state_dict.pop(
|
||||
"label_emb.0.2.bias"
|
||||
)
|
||||
|
||||
# Convert transformer blocks, for cogview4 is 28 blocks
|
||||
for i in range(28):
|
||||
block_prefix = f"transformer_blocks.{i}."
|
||||
old_prefix = f"transformer.layers.{i}."
|
||||
adaln_prefix = f"mixins.adaln.adaln_modules.{i}."
|
||||
new_state_dict[block_prefix + "norm1.linear.weight"] = original_state_dict.pop(adaln_prefix + "1.weight")
|
||||
new_state_dict[block_prefix + "norm1.linear.bias"] = original_state_dict.pop(adaln_prefix + "1.bias")
|
||||
|
||||
qkv_weight = original_state_dict.pop(old_prefix + "attention.query_key_value.weight")
|
||||
qkv_bias = original_state_dict.pop(old_prefix + "attention.query_key_value.bias")
|
||||
q, k, v = qkv_weight.chunk(3, dim=0)
|
||||
q_bias, k_bias, v_bias = qkv_bias.chunk(3, dim=0)
|
||||
|
||||
new_state_dict[block_prefix + "attn1.to_q.weight"] = q
|
||||
new_state_dict[block_prefix + "attn1.to_q.bias"] = q_bias
|
||||
new_state_dict[block_prefix + "attn1.to_k.weight"] = k
|
||||
new_state_dict[block_prefix + "attn1.to_k.bias"] = k_bias
|
||||
new_state_dict[block_prefix + "attn1.to_v.weight"] = v
|
||||
new_state_dict[block_prefix + "attn1.to_v.bias"] = v_bias
|
||||
|
||||
new_state_dict[block_prefix + "attn1.to_out.0.weight"] = original_state_dict.pop(
|
||||
old_prefix + "attention.dense.weight"
|
||||
)
|
||||
new_state_dict[block_prefix + "attn1.to_out.0.bias"] = original_state_dict.pop(
|
||||
old_prefix + "attention.dense.bias"
|
||||
)
|
||||
|
||||
new_state_dict[block_prefix + "ff.net.0.proj.weight"] = original_state_dict.pop(
|
||||
old_prefix + "mlp.dense_h_to_4h.weight"
|
||||
)
|
||||
new_state_dict[block_prefix + "ff.net.0.proj.bias"] = original_state_dict.pop(
|
||||
old_prefix + "mlp.dense_h_to_4h.bias"
|
||||
)
|
||||
new_state_dict[block_prefix + "ff.net.2.weight"] = original_state_dict.pop(
|
||||
old_prefix + "mlp.dense_4h_to_h.weight"
|
||||
)
|
||||
new_state_dict[block_prefix + "ff.net.2.bias"] = original_state_dict.pop(old_prefix + "mlp.dense_4h_to_h.bias")
|
||||
|
||||
# Convert final norm and projection
|
||||
new_state_dict["norm_out.linear.weight"] = swap_scale_shift(
|
||||
original_state_dict.pop("mixins.final_layer.adaln.1.weight"), dim=0
|
||||
)
|
||||
new_state_dict["norm_out.linear.bias"] = swap_scale_shift(
|
||||
original_state_dict.pop("mixins.final_layer.adaln.1.bias"), dim=0
|
||||
)
|
||||
new_state_dict["proj_out.weight"] = original_state_dict.pop("mixins.final_layer.linear.weight")
|
||||
new_state_dict["proj_out.bias"] = original_state_dict.pop("mixins.final_layer.linear.bias")
|
||||
|
||||
return new_state_dict
|
||||
|
||||
|
||||
def convert_cogview4_vae_checkpoint_to_diffusers(ckpt_path, vae_config):
|
||||
original_state_dict = torch.load(ckpt_path, map_location="cpu")["state_dict"]
|
||||
return convert_ldm_vae_checkpoint(original_state_dict, vae_config)
|
||||
|
||||
|
||||
def main(args):
|
||||
if args.dtype == "fp16":
|
||||
dtype = torch.float16
|
||||
elif args.dtype == "bf16":
|
||||
dtype = torch.bfloat16
|
||||
elif args.dtype == "fp32":
|
||||
dtype = torch.float32
|
||||
else:
|
||||
raise ValueError(f"Unsupported dtype: {args.dtype}")
|
||||
|
||||
transformer = None
|
||||
vae = None
|
||||
|
||||
if args.transformer_checkpoint_path is not None:
|
||||
converted_transformer_state_dict = convert_cogview4_transformer_checkpoint_to_diffusers(
|
||||
args.transformer_checkpoint_path
|
||||
)
|
||||
transformer = CogView4Transformer2DModel(
|
||||
patch_size=2,
|
||||
in_channels=16,
|
||||
num_layers=28,
|
||||
attention_head_dim=128,
|
||||
num_attention_heads=32,
|
||||
out_channels=16,
|
||||
text_embed_dim=4096,
|
||||
time_embed_dim=512,
|
||||
condition_dim=256,
|
||||
pos_embed_max_size=128,
|
||||
)
|
||||
transformer.load_state_dict(converted_transformer_state_dict, strict=True)
|
||||
if dtype is not None:
|
||||
# Original checkpoint data type will be preserved
|
||||
transformer = transformer.to(dtype=dtype)
|
||||
|
||||
if args.vae_checkpoint_path is not None:
|
||||
vae_config = {
|
||||
"in_channels": 3,
|
||||
"out_channels": 3,
|
||||
"down_block_types": ("DownEncoderBlock2D",) * 4,
|
||||
"up_block_types": ("UpDecoderBlock2D",) * 4,
|
||||
"block_out_channels": (128, 512, 1024, 1024),
|
||||
"layers_per_block": 3,
|
||||
"act_fn": "silu",
|
||||
"latent_channels": 16,
|
||||
"norm_num_groups": 32,
|
||||
"sample_size": 1024,
|
||||
"scaling_factor": 1.0,
|
||||
"force_upcast": True,
|
||||
"use_quant_conv": False,
|
||||
"use_post_quant_conv": False,
|
||||
"mid_block_add_attention": False,
|
||||
}
|
||||
converted_vae_state_dict = convert_cogview4_vae_checkpoint_to_diffusers(args.vae_checkpoint_path, vae_config)
|
||||
vae = AutoencoderKL(**vae_config)
|
||||
vae.load_state_dict(converted_vae_state_dict, strict=True)
|
||||
if dtype is not None:
|
||||
vae = vae.to(dtype=dtype)
|
||||
|
||||
text_encoder_id = "THUDM/glm-4-9b-hf"
|
||||
tokenizer = PreTrainedTokenizerFast.from_pretrained(text_encoder_id)
|
||||
text_encoder = GlmForCausalLM.from_pretrained(
|
||||
text_encoder_id,
|
||||
cache_dir=args.text_encoder_cache_dir,
|
||||
torch_dtype=torch.bfloat16 if args.dtype == "bf16" else torch.float32,
|
||||
)
|
||||
|
||||
for param in text_encoder.parameters():
|
||||
param.data = param.data.contiguous()
|
||||
|
||||
scheduler = FlowMatchEulerDiscreteScheduler(
|
||||
base_shift=0.25, max_shift=0.75, base_image_seq_len=256, use_dynamic_shifting=True, time_shift_type="linear"
|
||||
)
|
||||
|
||||
pipe = CogView4Pipeline(
|
||||
tokenizer=tokenizer,
|
||||
text_encoder=text_encoder,
|
||||
vae=vae,
|
||||
transformer=transformer,
|
||||
scheduler=scheduler,
|
||||
)
|
||||
|
||||
# This is necessary for users with insufficient memory, such as those using Colab and notebooks, as it can
|
||||
# save some memory used for model loading.
|
||||
pipe.save_pretrained(args.output_path, safe_serialization=True, max_shard_size="5GB", push_to_hub=args.push_to_hub)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(args)
|
||||
@@ -1,366 +0,0 @@
|
||||
"""
|
||||
Convert a CogView4 checkpoint from Megatron to the Diffusers format.
|
||||
|
||||
Example usage:
|
||||
python scripts/convert_cogview4_to_diffusers.py \
|
||||
--transformer_checkpoint_path 'your path/cogview4_6b/mp_rank_00/model_optim_rng.pt' \
|
||||
--vae_checkpoint_path 'your path/cogview4_6b/imagekl_ch16.pt' \
|
||||
--output_path "THUDM/CogView4-6B" \
|
||||
--dtype "bf16"
|
||||
|
||||
Arguments:
|
||||
--transformer_checkpoint_path: Path to Transformer state dict.
|
||||
--vae_checkpoint_path: Path to VAE state dict.
|
||||
--output_path: The path to save the converted model.
|
||||
--push_to_hub: Whether to push the converted checkpoint to the HF Hub or not. Defaults to `False`.
|
||||
--text_encoder_cache_dir: Cache directory where text encoder is located. Defaults to None, which means HF_HOME will be used.
|
||||
--dtype: The dtype to save the model in (default: "bf16", options: "fp16", "bf16", "fp32"). If None, the dtype of the state dict is considered.
|
||||
|
||||
Default is "bf16" because CogView4 uses bfloat16 for training.
|
||||
|
||||
Note: You must provide either --transformer_checkpoint_path or --vae_checkpoint_path.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
from transformers import GlmForCausalLM, PreTrainedTokenizerFast
|
||||
|
||||
from diffusers import AutoencoderKL, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler
|
||||
from diffusers.loaders.single_file_utils import convert_ldm_vae_checkpoint
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--transformer_checkpoint_path",
|
||||
default=None,
|
||||
type=str,
|
||||
help="Path to Megatron (not SAT) Transformer checkpoint, e.g., 'model_optim_rng.pt'.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--vae_checkpoint_path",
|
||||
default=None,
|
||||
type=str,
|
||||
help="(Optional) Path to VAE checkpoint, e.g., 'imagekl_ch16.pt'.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output_path",
|
||||
required=True,
|
||||
type=str,
|
||||
help="Directory to save the final Diffusers format pipeline.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--push_to_hub",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Whether to push the converted model to the HuggingFace Hub.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--text_encoder_cache_dir",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Specify the cache directory for the text encoder.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dtype",
|
||||
type=str,
|
||||
default="bf16",
|
||||
choices=["fp16", "bf16", "fp32"],
|
||||
help="Data type to save the model in.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--num_layers",
|
||||
type=int,
|
||||
default=28,
|
||||
help="Number of Transformer layers (e.g., 28, 48...).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--num_heads",
|
||||
type=int,
|
||||
default=32,
|
||||
help="Number of attention heads.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--hidden_size",
|
||||
type=int,
|
||||
default=4096,
|
||||
help="Transformer hidden dimension size.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--attention_head_dim",
|
||||
type=int,
|
||||
default=128,
|
||||
help="Dimension of each attention head.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--time_embed_dim",
|
||||
type=int,
|
||||
default=512,
|
||||
help="Dimension of time embeddings.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--condition_dim",
|
||||
type=int,
|
||||
default=256,
|
||||
help="Dimension of condition embeddings.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pos_embed_max_size",
|
||||
type=int,
|
||||
default=128,
|
||||
help="Maximum size for positional embeddings.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
def swap_scale_shift(weight, dim):
|
||||
"""
|
||||
Swap the scale and shift components in the weight tensor.
|
||||
|
||||
Args:
|
||||
weight (torch.Tensor): The original weight tensor.
|
||||
dim (int): The dimension along which to split.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: The modified weight tensor with scale and shift swapped.
|
||||
"""
|
||||
shift, scale = weight.chunk(2, dim=dim)
|
||||
new_weight = torch.cat([scale, shift], dim=dim)
|
||||
return new_weight
|
||||
|
||||
|
||||
def convert_megatron_transformer_checkpoint_to_diffusers(
|
||||
ckpt_path: str,
|
||||
num_layers: int,
|
||||
num_heads: int,
|
||||
hidden_size: int,
|
||||
):
|
||||
"""
|
||||
Convert a Megatron Transformer checkpoint to Diffusers format.
|
||||
|
||||
Args:
|
||||
ckpt_path (str): Path to the Megatron Transformer checkpoint.
|
||||
num_layers (int): Number of Transformer layers.
|
||||
num_heads (int): Number of attention heads.
|
||||
hidden_size (int): Hidden size of the Transformer.
|
||||
|
||||
Returns:
|
||||
dict: The converted state dictionary compatible with Diffusers.
|
||||
"""
|
||||
ckpt = torch.load(ckpt_path, map_location="cpu")
|
||||
mega = ckpt["model"]
|
||||
|
||||
new_state_dict = {}
|
||||
|
||||
# Patch Embedding
|
||||
new_state_dict["patch_embed.proj.weight"] = mega["encoder_expand_linear.weight"].reshape(hidden_size, 64)
|
||||
new_state_dict["patch_embed.proj.bias"] = mega["encoder_expand_linear.bias"]
|
||||
new_state_dict["patch_embed.text_proj.weight"] = mega["text_projector.weight"]
|
||||
new_state_dict["patch_embed.text_proj.bias"] = mega["text_projector.bias"]
|
||||
|
||||
# Time Condition Embedding
|
||||
new_state_dict["time_condition_embed.timestep_embedder.linear_1.weight"] = mega[
|
||||
"time_embedding.time_embed.0.weight"
|
||||
]
|
||||
new_state_dict["time_condition_embed.timestep_embedder.linear_1.bias"] = mega["time_embedding.time_embed.0.bias"]
|
||||
new_state_dict["time_condition_embed.timestep_embedder.linear_2.weight"] = mega[
|
||||
"time_embedding.time_embed.2.weight"
|
||||
]
|
||||
new_state_dict["time_condition_embed.timestep_embedder.linear_2.bias"] = mega["time_embedding.time_embed.2.bias"]
|
||||
|
||||
new_state_dict["time_condition_embed.condition_embedder.linear_1.weight"] = mega[
|
||||
"label_embedding.label_embed.0.weight"
|
||||
]
|
||||
new_state_dict["time_condition_embed.condition_embedder.linear_1.bias"] = mega[
|
||||
"label_embedding.label_embed.0.bias"
|
||||
]
|
||||
new_state_dict["time_condition_embed.condition_embedder.linear_2.weight"] = mega[
|
||||
"label_embedding.label_embed.2.weight"
|
||||
]
|
||||
new_state_dict["time_condition_embed.condition_embedder.linear_2.bias"] = mega[
|
||||
"label_embedding.label_embed.2.bias"
|
||||
]
|
||||
|
||||
# Convert each Transformer layer
|
||||
for i in tqdm(range(num_layers), desc="Converting layers (Megatron->Diffusers)"):
|
||||
block_prefix = f"transformer_blocks.{i}."
|
||||
|
||||
# AdaLayerNorm
|
||||
new_state_dict[block_prefix + "norm1.linear.weight"] = swap_scale_shift(
|
||||
mega[f"decoder.layers.{i}.adaln.weight"], dim=0
|
||||
)
|
||||
new_state_dict[block_prefix + "norm1.linear.bias"] = swap_scale_shift(
|
||||
mega[f"decoder.layers.{i}.adaln.bias"], dim=0
|
||||
)
|
||||
|
||||
# QKV
|
||||
qkv_weight = mega[f"decoder.layers.{i}.self_attention.linear_qkv.weight"]
|
||||
qkv_bias = mega[f"decoder.layers.{i}.self_attention.linear_qkv.bias"]
|
||||
|
||||
# Reshape to match SAT logic
|
||||
qkv_weight = qkv_weight.view(num_heads, 3, hidden_size // num_heads, hidden_size)
|
||||
qkv_weight = qkv_weight.permute(1, 0, 2, 3).reshape(3 * hidden_size, hidden_size)
|
||||
|
||||
qkv_bias = qkv_bias.view(num_heads, 3, hidden_size // num_heads)
|
||||
qkv_bias = qkv_bias.permute(1, 0, 2).reshape(3 * hidden_size)
|
||||
|
||||
# Assign to Diffusers keys
|
||||
q, k, v = torch.chunk(qkv_weight, 3, dim=0)
|
||||
qb, kb, vb = torch.chunk(qkv_bias, 3, dim=0)
|
||||
|
||||
new_state_dict[block_prefix + "attn1.to_q.weight"] = q
|
||||
new_state_dict[block_prefix + "attn1.to_q.bias"] = qb
|
||||
new_state_dict[block_prefix + "attn1.to_k.weight"] = k
|
||||
new_state_dict[block_prefix + "attn1.to_k.bias"] = kb
|
||||
new_state_dict[block_prefix + "attn1.to_v.weight"] = v
|
||||
new_state_dict[block_prefix + "attn1.to_v.bias"] = vb
|
||||
|
||||
# Attention Output
|
||||
new_state_dict[block_prefix + "attn1.to_out.0.weight"] = mega[
|
||||
f"decoder.layers.{i}.self_attention.linear_proj.weight"
|
||||
].T
|
||||
new_state_dict[block_prefix + "attn1.to_out.0.bias"] = mega[
|
||||
f"decoder.layers.{i}.self_attention.linear_proj.bias"
|
||||
]
|
||||
|
||||
# MLP
|
||||
new_state_dict[block_prefix + "ff.net.0.proj.weight"] = mega[f"decoder.layers.{i}.mlp.linear_fc1.weight"]
|
||||
new_state_dict[block_prefix + "ff.net.0.proj.bias"] = mega[f"decoder.layers.{i}.mlp.linear_fc1.bias"]
|
||||
new_state_dict[block_prefix + "ff.net.2.weight"] = mega[f"decoder.layers.{i}.mlp.linear_fc2.weight"]
|
||||
new_state_dict[block_prefix + "ff.net.2.bias"] = mega[f"decoder.layers.{i}.mlp.linear_fc2.bias"]
|
||||
|
||||
# Final Layers
|
||||
new_state_dict["norm_out.linear.weight"] = swap_scale_shift(mega["adaln_final.weight"], dim=0)
|
||||
new_state_dict["norm_out.linear.bias"] = swap_scale_shift(mega["adaln_final.bias"], dim=0)
|
||||
new_state_dict["proj_out.weight"] = mega["output_projector.weight"]
|
||||
new_state_dict["proj_out.bias"] = mega["output_projector.bias"]
|
||||
|
||||
return new_state_dict
|
||||
|
||||
|
||||
def convert_cogview4_vae_checkpoint_to_diffusers(ckpt_path, vae_config):
|
||||
"""
|
||||
Convert a CogView4 VAE checkpoint to Diffusers format.
|
||||
|
||||
Args:
|
||||
ckpt_path (str): Path to the VAE checkpoint.
|
||||
vae_config (dict): Configuration dictionary for the VAE.
|
||||
|
||||
Returns:
|
||||
dict: The converted VAE state dictionary compatible with Diffusers.
|
||||
"""
|
||||
original_state_dict = torch.load(ckpt_path, map_location="cpu")["state_dict"]
|
||||
return convert_ldm_vae_checkpoint(original_state_dict, vae_config)
|
||||
|
||||
|
||||
def main(args):
|
||||
"""
|
||||
Main function to convert CogView4 checkpoints to Diffusers format.
|
||||
|
||||
Args:
|
||||
args (argparse.Namespace): Parsed command-line arguments.
|
||||
"""
|
||||
# Determine the desired data type
|
||||
if args.dtype == "fp16":
|
||||
dtype = torch.float16
|
||||
elif args.dtype == "bf16":
|
||||
dtype = torch.bfloat16
|
||||
elif args.dtype == "fp32":
|
||||
dtype = torch.float32
|
||||
else:
|
||||
raise ValueError(f"Unsupported dtype: {args.dtype}")
|
||||
|
||||
transformer = None
|
||||
vae = None
|
||||
|
||||
# Convert Transformer checkpoint if provided
|
||||
if args.transformer_checkpoint_path is not None:
|
||||
converted_transformer_state_dict = convert_megatron_transformer_checkpoint_to_diffusers(
|
||||
ckpt_path=args.transformer_checkpoint_path,
|
||||
num_layers=args.num_layers,
|
||||
num_heads=args.num_heads,
|
||||
hidden_size=args.hidden_size,
|
||||
)
|
||||
transformer = CogView4Transformer2DModel(
|
||||
patch_size=2,
|
||||
in_channels=16,
|
||||
num_layers=args.num_layers,
|
||||
attention_head_dim=args.attention_head_dim,
|
||||
num_attention_heads=args.num_heads,
|
||||
out_channels=16,
|
||||
text_embed_dim=args.hidden_size,
|
||||
time_embed_dim=args.time_embed_dim,
|
||||
condition_dim=args.condition_dim,
|
||||
pos_embed_max_size=args.pos_embed_max_size,
|
||||
)
|
||||
|
||||
transformer.load_state_dict(converted_transformer_state_dict, strict=True)
|
||||
|
||||
# Convert to the specified dtype
|
||||
if dtype is not None:
|
||||
transformer = transformer.to(dtype=dtype)
|
||||
|
||||
# Convert VAE checkpoint if provided
|
||||
if args.vae_checkpoint_path is not None:
|
||||
vae_config = {
|
||||
"in_channels": 3,
|
||||
"out_channels": 3,
|
||||
"down_block_types": ("DownEncoderBlock2D",) * 4,
|
||||
"up_block_types": ("UpDecoderBlock2D",) * 4,
|
||||
"block_out_channels": (128, 512, 1024, 1024),
|
||||
"layers_per_block": 3,
|
||||
"act_fn": "silu",
|
||||
"latent_channels": 16,
|
||||
"norm_num_groups": 32,
|
||||
"sample_size": 1024,
|
||||
"scaling_factor": 1.0,
|
||||
"force_upcast": True,
|
||||
"use_quant_conv": False,
|
||||
"use_post_quant_conv": False,
|
||||
"mid_block_add_attention": False,
|
||||
}
|
||||
converted_vae_state_dict = convert_cogview4_vae_checkpoint_to_diffusers(args.vae_checkpoint_path, vae_config)
|
||||
vae = AutoencoderKL(**vae_config)
|
||||
vae.load_state_dict(converted_vae_state_dict, strict=True)
|
||||
if dtype is not None:
|
||||
vae = vae.to(dtype=dtype)
|
||||
|
||||
# Load the text encoder and tokenizer
|
||||
text_encoder_id = "THUDM/glm-4-9b-hf"
|
||||
tokenizer = PreTrainedTokenizerFast.from_pretrained(text_encoder_id)
|
||||
text_encoder = GlmForCausalLM.from_pretrained(
|
||||
text_encoder_id,
|
||||
cache_dir=args.text_encoder_cache_dir,
|
||||
torch_dtype=torch.bfloat16 if args.dtype == "bf16" else torch.float32,
|
||||
)
|
||||
for param in text_encoder.parameters():
|
||||
param.data = param.data.contiguous()
|
||||
|
||||
# Initialize the scheduler
|
||||
scheduler = FlowMatchEulerDiscreteScheduler(
|
||||
base_shift=0.25, max_shift=0.75, base_image_seq_len=256, use_dynamic_shifting=True, time_shift_type="linear"
|
||||
)
|
||||
|
||||
# Create the pipeline
|
||||
pipe = CogView4Pipeline(
|
||||
tokenizer=tokenizer,
|
||||
text_encoder=text_encoder,
|
||||
vae=vae,
|
||||
transformer=transformer,
|
||||
scheduler=scheduler,
|
||||
)
|
||||
|
||||
# Save the converted pipeline
|
||||
pipe.save_pretrained(
|
||||
args.output_path,
|
||||
safe_serialization=True,
|
||||
max_shard_size="5GB",
|
||||
push_to_hub=args.push_to_hub,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(args)
|
||||
@@ -1,203 +0,0 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import torch
|
||||
from huggingface_hub import snapshot_download
|
||||
from safetensors.torch import load_file
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, OmniGenPipeline, OmniGenTransformer2DModel
|
||||
|
||||
|
||||
def main(args):
|
||||
# checkpoint from https://huggingface.co/Shitao/OmniGen-v1
|
||||
|
||||
if not os.path.exists(args.origin_ckpt_path):
|
||||
print("Model not found, downloading...")
|
||||
cache_folder = os.getenv("HF_HUB_CACHE")
|
||||
args.origin_ckpt_path = snapshot_download(
|
||||
repo_id=args.origin_ckpt_path,
|
||||
cache_dir=cache_folder,
|
||||
ignore_patterns=["flax_model.msgpack", "rust_model.ot", "tf_model.h5", "model.pt"],
|
||||
)
|
||||
print(f"Downloaded model to {args.origin_ckpt_path}")
|
||||
|
||||
ckpt = os.path.join(args.origin_ckpt_path, "model.safetensors")
|
||||
ckpt = load_file(ckpt, device="cpu")
|
||||
|
||||
mapping_dict = {
|
||||
"pos_embed": "patch_embedding.pos_embed",
|
||||
"x_embedder.proj.weight": "patch_embedding.output_image_proj.weight",
|
||||
"x_embedder.proj.bias": "patch_embedding.output_image_proj.bias",
|
||||
"input_x_embedder.proj.weight": "patch_embedding.input_image_proj.weight",
|
||||
"input_x_embedder.proj.bias": "patch_embedding.input_image_proj.bias",
|
||||
"final_layer.adaLN_modulation.1.weight": "norm_out.linear.weight",
|
||||
"final_layer.adaLN_modulation.1.bias": "norm_out.linear.bias",
|
||||
"final_layer.linear.weight": "proj_out.weight",
|
||||
"final_layer.linear.bias": "proj_out.bias",
|
||||
"time_token.mlp.0.weight": "time_token.linear_1.weight",
|
||||
"time_token.mlp.0.bias": "time_token.linear_1.bias",
|
||||
"time_token.mlp.2.weight": "time_token.linear_2.weight",
|
||||
"time_token.mlp.2.bias": "time_token.linear_2.bias",
|
||||
"t_embedder.mlp.0.weight": "t_embedder.linear_1.weight",
|
||||
"t_embedder.mlp.0.bias": "t_embedder.linear_1.bias",
|
||||
"t_embedder.mlp.2.weight": "t_embedder.linear_2.weight",
|
||||
"t_embedder.mlp.2.bias": "t_embedder.linear_2.bias",
|
||||
"llm.embed_tokens.weight": "embed_tokens.weight",
|
||||
}
|
||||
|
||||
converted_state_dict = {}
|
||||
for k, v in ckpt.items():
|
||||
if k in mapping_dict:
|
||||
converted_state_dict[mapping_dict[k]] = v
|
||||
elif "qkv" in k:
|
||||
to_q, to_k, to_v = v.chunk(3)
|
||||
converted_state_dict[f"layers.{k.split('.')[2]}.self_attn.to_q.weight"] = to_q
|
||||
converted_state_dict[f"layers.{k.split('.')[2]}.self_attn.to_k.weight"] = to_k
|
||||
converted_state_dict[f"layers.{k.split('.')[2]}.self_attn.to_v.weight"] = to_v
|
||||
elif "o_proj" in k:
|
||||
converted_state_dict[f"layers.{k.split('.')[2]}.self_attn.to_out.0.weight"] = v
|
||||
else:
|
||||
converted_state_dict[k[4:]] = v
|
||||
|
||||
transformer = OmniGenTransformer2DModel(
|
||||
rope_scaling={
|
||||
"long_factor": [
|
||||
1.0299999713897705,
|
||||
1.0499999523162842,
|
||||
1.0499999523162842,
|
||||
1.0799999237060547,
|
||||
1.2299998998641968,
|
||||
1.2299998998641968,
|
||||
1.2999999523162842,
|
||||
1.4499999284744263,
|
||||
1.5999999046325684,
|
||||
1.6499998569488525,
|
||||
1.8999998569488525,
|
||||
2.859999895095825,
|
||||
3.68999981880188,
|
||||
5.419999599456787,
|
||||
5.489999771118164,
|
||||
5.489999771118164,
|
||||
9.09000015258789,
|
||||
11.579999923706055,
|
||||
15.65999984741211,
|
||||
15.769999504089355,
|
||||
15.789999961853027,
|
||||
18.360000610351562,
|
||||
21.989999771118164,
|
||||
23.079999923706055,
|
||||
30.009998321533203,
|
||||
32.35000228881836,
|
||||
32.590003967285156,
|
||||
35.56000518798828,
|
||||
39.95000457763672,
|
||||
53.840003967285156,
|
||||
56.20000457763672,
|
||||
57.95000457763672,
|
||||
59.29000473022461,
|
||||
59.77000427246094,
|
||||
59.920005798339844,
|
||||
61.190006256103516,
|
||||
61.96000671386719,
|
||||
62.50000762939453,
|
||||
63.3700065612793,
|
||||
63.48000717163086,
|
||||
63.48000717163086,
|
||||
63.66000747680664,
|
||||
63.850006103515625,
|
||||
64.08000946044922,
|
||||
64.760009765625,
|
||||
64.80001068115234,
|
||||
64.81001281738281,
|
||||
64.81001281738281,
|
||||
],
|
||||
"short_factor": [
|
||||
1.05,
|
||||
1.05,
|
||||
1.05,
|
||||
1.1,
|
||||
1.1,
|
||||
1.1,
|
||||
1.2500000000000002,
|
||||
1.2500000000000002,
|
||||
1.4000000000000004,
|
||||
1.4500000000000004,
|
||||
1.5500000000000005,
|
||||
1.8500000000000008,
|
||||
1.9000000000000008,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.000000000000001,
|
||||
2.1000000000000005,
|
||||
2.1000000000000005,
|
||||
2.2,
|
||||
2.3499999999999996,
|
||||
2.3499999999999996,
|
||||
2.3499999999999996,
|
||||
2.3499999999999996,
|
||||
2.3999999999999995,
|
||||
2.3999999999999995,
|
||||
2.6499999999999986,
|
||||
2.6999999999999984,
|
||||
2.8999999999999977,
|
||||
2.9499999999999975,
|
||||
3.049999999999997,
|
||||
3.049999999999997,
|
||||
3.049999999999997,
|
||||
],
|
||||
"type": "su",
|
||||
},
|
||||
patch_size=2,
|
||||
in_channels=4,
|
||||
pos_embed_max_size=192,
|
||||
)
|
||||
transformer.load_state_dict(converted_state_dict, strict=True)
|
||||
transformer.to(torch.bfloat16)
|
||||
|
||||
num_model_params = sum(p.numel() for p in transformer.parameters())
|
||||
print(f"Total number of transformer parameters: {num_model_params}")
|
||||
|
||||
scheduler = FlowMatchEulerDiscreteScheduler(invert_sigmas=True, num_train_timesteps=1)
|
||||
|
||||
vae = AutoencoderKL.from_pretrained(os.path.join(args.origin_ckpt_path, "vae"), torch_dtype=torch.float32)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.origin_ckpt_path)
|
||||
|
||||
pipeline = OmniGenPipeline(tokenizer=tokenizer, transformer=transformer, vae=vae, scheduler=scheduler)
|
||||
pipeline.save_pretrained(args.dump_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"--origin_ckpt_path",
|
||||
default="Shitao/OmniGen-v1",
|
||||
type=str,
|
||||
required=False,
|
||||
help="Path to the checkpoint to convert.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--dump_path", default="OmniGen-v1-diffusers", type=str, required=False, help="Path to the output pipeline."
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
@@ -1,151 +0,0 @@
|
||||
"""
|
||||
This script demonstrates how to extract a LoRA checkpoint from a fully finetuned model with the CogVideoX model.
|
||||
|
||||
To make it work for other models:
|
||||
|
||||
* Change the model class. Here we use `CogVideoXTransformer3DModel`. For Flux, it would be `FluxTransformer2DModel`,
|
||||
for example. (TODO: more reason to add `AutoModel`).
|
||||
* Spply path to the base checkpoint via `base_ckpt_path`.
|
||||
* Supply path to the fully fine-tuned checkpoint via `--finetune_ckpt_path`.
|
||||
* Change the `--rank` as needed.
|
||||
|
||||
Example usage:
|
||||
|
||||
```bash
|
||||
python extract_lora_from_model.py \
|
||||
--base_ckpt_path=THUDM/CogVideoX-5b \
|
||||
--finetune_ckpt_path=finetrainers/cakeify-v0 \
|
||||
--lora_out_path=cakeify_lora.safetensors
|
||||
```
|
||||
|
||||
Script is adapted from
|
||||
https://github.com/Stability-AI/stability-ComfyUI-nodes/blob/001154622564b17223ce0191803c5fff7b87146c/control_lora_create.py
|
||||
"""
|
||||
|
||||
import argparse
|
||||
|
||||
import torch
|
||||
from safetensors.torch import save_file
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
from diffusers import CogVideoXTransformer3DModel
|
||||
|
||||
|
||||
RANK = 64
|
||||
CLAMP_QUANTILE = 0.99
|
||||
|
||||
|
||||
# Comes from
|
||||
# https://github.com/Stability-AI/stability-ComfyUI-nodes/blob/001154622564b17223ce0191803c5fff7b87146c/control_lora_create.py#L9
|
||||
def extract_lora(diff, rank):
|
||||
# Important to use CUDA otherwise, very slow!
|
||||
if torch.cuda.is_available():
|
||||
diff = diff.to("cuda")
|
||||
|
||||
is_conv2d = len(diff.shape) == 4
|
||||
kernel_size = None if not is_conv2d else diff.size()[2:4]
|
||||
is_conv2d_3x3 = is_conv2d and kernel_size != (1, 1)
|
||||
out_dim, in_dim = diff.size()[0:2]
|
||||
rank = min(rank, in_dim, out_dim)
|
||||
|
||||
if is_conv2d:
|
||||
if is_conv2d_3x3:
|
||||
diff = diff.flatten(start_dim=1)
|
||||
else:
|
||||
diff = diff.squeeze()
|
||||
|
||||
U, S, Vh = torch.linalg.svd(diff.float())
|
||||
U = U[:, :rank]
|
||||
S = S[:rank]
|
||||
U = U @ torch.diag(S)
|
||||
Vh = Vh[:rank, :]
|
||||
|
||||
dist = torch.cat([U.flatten(), Vh.flatten()])
|
||||
hi_val = torch.quantile(dist, CLAMP_QUANTILE)
|
||||
low_val = -hi_val
|
||||
|
||||
U = U.clamp(low_val, hi_val)
|
||||
Vh = Vh.clamp(low_val, hi_val)
|
||||
if is_conv2d:
|
||||
U = U.reshape(out_dim, rank, 1, 1)
|
||||
Vh = Vh.reshape(rank, in_dim, kernel_size[0], kernel_size[1])
|
||||
return (U.cpu(), Vh.cpu())
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--base_ckpt_path",
|
||||
default=None,
|
||||
type=str,
|
||||
required=True,
|
||||
help="Base checkpoint path from which the model was finetuned. Can be a model ID on the Hub.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--base_subfolder",
|
||||
default="transformer",
|
||||
type=str,
|
||||
help="subfolder to load the base checkpoint from if any.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--finetune_ckpt_path",
|
||||
default=None,
|
||||
type=str,
|
||||
required=True,
|
||||
help="Fully fine-tuned checkpoint path. Can be a model ID on the Hub.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--finetune_subfolder",
|
||||
default=None,
|
||||
type=str,
|
||||
help="subfolder to load the fulle finetuned checkpoint from if any.",
|
||||
)
|
||||
parser.add_argument("--rank", default=64, type=int)
|
||||
parser.add_argument("--lora_out_path", default=None, type=str, required=True)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.lora_out_path.endswith(".safetensors"):
|
||||
raise ValueError("`lora_out_path` must end with `.safetensors`.")
|
||||
|
||||
return args
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def main(args):
|
||||
model_finetuned = CogVideoXTransformer3DModel.from_pretrained(
|
||||
args.finetune_ckpt_path, subfolder=args.finetune_subfolder, torch_dtype=torch.bfloat16
|
||||
)
|
||||
state_dict_ft = model_finetuned.state_dict()
|
||||
|
||||
# Change the `subfolder` as needed.
|
||||
base_model = CogVideoXTransformer3DModel.from_pretrained(
|
||||
args.base_ckpt_path, subfolder=args.base_subfolder, torch_dtype=torch.bfloat16
|
||||
)
|
||||
state_dict = base_model.state_dict()
|
||||
output_dict = {}
|
||||
|
||||
for k in tqdm(state_dict, desc="Extracting LoRA..."):
|
||||
original_param = state_dict[k]
|
||||
finetuned_param = state_dict_ft[k]
|
||||
if len(original_param.shape) >= 2:
|
||||
diff = finetuned_param.float() - original_param.float()
|
||||
out = extract_lora(diff, RANK)
|
||||
name = k
|
||||
|
||||
if name.endswith(".weight"):
|
||||
name = name[: -len(".weight")]
|
||||
down_key = "{}.lora_A.weight".format(name)
|
||||
up_key = "{}.lora_B.weight".format(name)
|
||||
|
||||
output_dict[up_key] = out[0].contiguous().to(finetuned_param.dtype)
|
||||
output_dict[down_key] = out[1].contiguous().to(finetuned_param.dtype)
|
||||
|
||||
prefix = "transformer" if "transformer" in base_model.__class__.__name__.lower() else "unet"
|
||||
output_dict = {f"{prefix}.{k}": v for k, v in output_dict.items()}
|
||||
save_file(output_dict, args.lora_out_path)
|
||||
print(f"LoRA saved and it contains {len(output_dict)} keys.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
main(args)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user