mirror of
https://github.com/huggingface/diffusers.git
synced 2025-12-13 07:54:45 +08:00
Compare commits
1 Commits
chroma-doc
...
release-to
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ba2ef8bc40 |
6
.github/workflows/nightly_tests.yml
vendored
6
.github/workflows/nightly_tests.yml
vendored
@@ -265,7 +265,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Run PyTorch CUDA tests
|
- name: Run PyTorch CUDA tests
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
@@ -505,7 +505,7 @@ jobs:
|
|||||||
# shell: arch -arch arm64 bash {0}
|
# shell: arch -arch arm64 bash {0}
|
||||||
# env:
|
# env:
|
||||||
# HF_HOME: /System/Volumes/Data/mnt/cache
|
# HF_HOME: /System/Volumes/Data/mnt/cache
|
||||||
# HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
# HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
# run: |
|
# run: |
|
||||||
# ${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
# ${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
||||||
# --report-log=tests_torch_mps.log \
|
# --report-log=tests_torch_mps.log \
|
||||||
@@ -561,7 +561,7 @@ jobs:
|
|||||||
# shell: arch -arch arm64 bash {0}
|
# shell: arch -arch arm64 bash {0}
|
||||||
# env:
|
# env:
|
||||||
# HF_HOME: /System/Volumes/Data/mnt/cache
|
# HF_HOME: /System/Volumes/Data/mnt/cache
|
||||||
# HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
# HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
# run: |
|
# run: |
|
||||||
# ${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
# ${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
||||||
# --report-log=tests_torch_mps.log \
|
# --report-log=tests_torch_mps.log \
|
||||||
|
|||||||
10
.github/workflows/push_tests.yml
vendored
10
.github/workflows/push_tests.yml
vendored
@@ -187,7 +187,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Run Flax TPU tests
|
- name: Run Flax TPU tests
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 0 \
|
python -m pytest -n 0 \
|
||||||
-s -v -k "Flax" \
|
-s -v -k "Flax" \
|
||||||
@@ -235,7 +235,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Run ONNXRuntime CUDA tests
|
- name: Run ONNXRuntime CUDA tests
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "Onnx" \
|
-s -v -k "Onnx" \
|
||||||
@@ -283,7 +283,7 @@ jobs:
|
|||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
- name: Run example tests on GPU
|
- name: Run example tests on GPU
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
RUN_COMPILE: yes
|
RUN_COMPILE: yes
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
||||||
@@ -326,7 +326,7 @@ jobs:
|
|||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
- name: Run example tests on GPU
|
- name: Run example tests on GPU
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
@@ -372,7 +372,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Run example tests on GPU
|
- name: Run example tests on GPU
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||||
python -m uv pip install timm
|
python -m uv pip install timm
|
||||||
|
|||||||
16
.github/workflows/release_tests_fast.yml
vendored
16
.github/workflows/release_tests_fast.yml
vendored
@@ -81,7 +81,7 @@ jobs:
|
|||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
- name: Slow PyTorch CUDA checkpoint tests on Ubuntu
|
- name: Slow PyTorch CUDA checkpoint tests on Ubuntu
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
@@ -135,7 +135,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Run PyTorch CUDA tests
|
- name: Run PyTorch CUDA tests
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
@@ -186,7 +186,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Run PyTorch CUDA tests
|
- name: Run PyTorch CUDA tests
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
||||||
CUBLAS_WORKSPACE_CONFIG: :16:8
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
||||||
run: |
|
run: |
|
||||||
@@ -241,7 +241,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Run slow Flax TPU tests
|
- name: Run slow Flax TPU tests
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 0 \
|
python -m pytest -n 0 \
|
||||||
-s -v -k "Flax" \
|
-s -v -k "Flax" \
|
||||||
@@ -289,7 +289,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Run slow ONNXRuntime CUDA tests
|
- name: Run slow ONNXRuntime CUDA tests
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
||||||
-s -v -k "Onnx" \
|
-s -v -k "Onnx" \
|
||||||
@@ -337,7 +337,7 @@ jobs:
|
|||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
- name: Run example tests on GPU
|
- name: Run example tests on GPU
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
RUN_COMPILE: yes
|
RUN_COMPILE: yes
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
||||||
@@ -380,7 +380,7 @@ jobs:
|
|||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
- name: Run example tests on GPU
|
- name: Run example tests on GPU
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
@@ -426,7 +426,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Run example tests on GPU
|
- name: Run example tests on GPU
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
||||||
python -m uv pip install timm
|
python -m uv pip install timm
|
||||||
|
|||||||
Reference in New Issue
Block a user