mirror of
https://github.com/huggingface/diffusers.git
synced 2025-12-10 14:34:55 +08:00
Compare commits
4 Commits
v0.18.0
...
v0.10.2-pa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0ca172407d | ||
|
|
315f37674b | ||
|
|
ea96fa686e | ||
|
|
b9b344e58a |
@@ -17,6 +17,7 @@ from accelerate.utils import set_seed
|
|||||||
from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel
|
from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel
|
||||||
from diffusers.optimization import get_scheduler
|
from diffusers.optimization import get_scheduler
|
||||||
from diffusers.utils import check_min_version
|
from diffusers.utils import check_min_version
|
||||||
|
from diffusers.utils.import_utils import is_xformers_available
|
||||||
from huggingface_hub import HfFolder, Repository, whoami
|
from huggingface_hub import HfFolder, Repository, whoami
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
from torchvision import transforms
|
from torchvision import transforms
|
||||||
@@ -488,6 +489,15 @@ def main(args):
|
|||||||
revision=args.revision,
|
revision=args.revision,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if is_xformers_available():
|
||||||
|
try:
|
||||||
|
unet.enable_xformers_memory_efficient_attention(True)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(
|
||||||
|
"Could not enable memory efficient attention. Make sure xformers is installed"
|
||||||
|
f" correctly and a GPU is available: {e}"
|
||||||
|
)
|
||||||
|
|
||||||
vae.requires_grad_(False)
|
vae.requires_grad_(False)
|
||||||
if not args.train_text_encoder:
|
if not args.train_text_encoder:
|
||||||
text_encoder.requires_grad_(False)
|
text_encoder.requires_grad_(False)
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ from datasets import load_dataset
|
|||||||
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
|
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
|
||||||
from diffusers.optimization import get_scheduler
|
from diffusers.optimization import get_scheduler
|
||||||
from diffusers.utils import check_min_version
|
from diffusers.utils import check_min_version
|
||||||
|
from diffusers.utils.import_utils import is_xformers_available
|
||||||
from huggingface_hub import HfFolder, Repository, whoami
|
from huggingface_hub import HfFolder, Repository, whoami
|
||||||
from torchvision import transforms
|
from torchvision import transforms
|
||||||
from tqdm.auto import tqdm
|
from tqdm.auto import tqdm
|
||||||
@@ -364,6 +365,15 @@ def main():
|
|||||||
revision=args.revision,
|
revision=args.revision,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if is_xformers_available():
|
||||||
|
try:
|
||||||
|
unet.enable_xformers_memory_efficient_attention(True)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(
|
||||||
|
"Could not enable memory efficient attention. Make sure xformers is installed"
|
||||||
|
f" correctly and a GPU is available: {e}"
|
||||||
|
)
|
||||||
|
|
||||||
# Freeze vae and text_encoder
|
# Freeze vae and text_encoder
|
||||||
vae.requires_grad_(False)
|
vae.requires_grad_(False)
|
||||||
text_encoder.requires_grad_(False)
|
text_encoder.requires_grad_(False)
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ from diffusers import AutoencoderKL, DDPMScheduler, PNDMScheduler, StableDiffusi
|
|||||||
from diffusers.optimization import get_scheduler
|
from diffusers.optimization import get_scheduler
|
||||||
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
||||||
from diffusers.utils import check_min_version
|
from diffusers.utils import check_min_version
|
||||||
|
from diffusers.utils.import_utils import is_xformers_available
|
||||||
from huggingface_hub import HfFolder, Repository, whoami
|
from huggingface_hub import HfFolder, Repository, whoami
|
||||||
|
|
||||||
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
|
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
|
||||||
@@ -439,6 +440,15 @@ def main():
|
|||||||
revision=args.revision,
|
revision=args.revision,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if is_xformers_available():
|
||||||
|
try:
|
||||||
|
unet.enable_xformers_memory_efficient_attention(True)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(
|
||||||
|
"Could not enable memory efficient attention. Make sure xformers is installed"
|
||||||
|
f" correctly and a GPU is available: {e}"
|
||||||
|
)
|
||||||
|
|
||||||
# Resize the token embeddings as we are adding new special tokens to the tokenizer
|
# Resize the token embeddings as we are adding new special tokens to the tokenizer
|
||||||
text_encoder.resize_token_embeddings(len(tokenizer))
|
text_encoder.resize_token_embeddings(len(tokenizer))
|
||||||
|
|
||||||
|
|||||||
2
setup.py
2
setup.py
@@ -218,7 +218,7 @@ install_requires = [
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name="diffusers",
|
name="diffusers",
|
||||||
version="0.10.0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
|
version="0.10.2", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
|
||||||
description="Diffusers",
|
description="Diffusers",
|
||||||
long_description=open("README.md", "r", encoding="utf-8").read(),
|
long_description=open("README.md", "r", encoding="utf-8").read(),
|
||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
__version__ = "0.10.0"
|
__version__ = "0.10.2"
|
||||||
|
|
||||||
from .configuration_utils import ConfigMixin
|
from .configuration_utils import ConfigMixin
|
||||||
from .onnx_utils import OnnxRuntimeModel
|
from .onnx_utils import OnnxRuntimeModel
|
||||||
@@ -18,18 +18,6 @@ from .utils import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# Make sure `transformers` is up to date
|
|
||||||
if is_transformers_available():
|
|
||||||
import transformers
|
|
||||||
|
|
||||||
if is_transformers_version("<", "4.25.1"):
|
|
||||||
raise ImportError(
|
|
||||||
f"`diffusers` requires transformers >= 4.25.1 to function correctly, but {transformers.__version__} was"
|
|
||||||
" found in your environment. You can upgrade it with pip: `pip install transformers --upgrade`"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if not is_torch_available():
|
if not is_torch_available():
|
||||||
raise OptionalDependencyNotAvailable()
|
raise OptionalDependencyNotAvailable()
|
||||||
|
|||||||
@@ -188,6 +188,39 @@ class ModelMixin(torch.nn.Module):
|
|||||||
if self._supports_gradient_checkpointing:
|
if self._supports_gradient_checkpointing:
|
||||||
self.apply(partial(self._set_gradient_checkpointing, value=False))
|
self.apply(partial(self._set_gradient_checkpointing, value=False))
|
||||||
|
|
||||||
|
def set_use_memory_efficient_attention_xformers(self, valid: bool) -> None:
|
||||||
|
# Recursively walk through all the children.
|
||||||
|
# Any children which exposes the set_use_memory_efficient_attention_xformers method
|
||||||
|
# gets the message
|
||||||
|
def fn_recursive_set_mem_eff(module: torch.nn.Module):
|
||||||
|
if hasattr(module, "set_use_memory_efficient_attention_xformers"):
|
||||||
|
module.set_use_memory_efficient_attention_xformers(valid)
|
||||||
|
|
||||||
|
for child in module.children():
|
||||||
|
fn_recursive_set_mem_eff(child)
|
||||||
|
|
||||||
|
for module in self.children():
|
||||||
|
if isinstance(module, torch.nn.Module):
|
||||||
|
fn_recursive_set_mem_eff(module)
|
||||||
|
|
||||||
|
def enable_xformers_memory_efficient_attention(self):
|
||||||
|
r"""
|
||||||
|
Enable memory efficient attention as implemented in xformers.
|
||||||
|
|
||||||
|
When this option is enabled, you should observe lower GPU memory usage and a potential speed up at inference
|
||||||
|
time. Speed up at training time is not guaranteed.
|
||||||
|
|
||||||
|
Warning: When Memory Efficient Attention and Sliced attention are both enabled, the Memory Efficient Attention
|
||||||
|
is used.
|
||||||
|
"""
|
||||||
|
self.set_use_memory_efficient_attention_xformers(True)
|
||||||
|
|
||||||
|
def disable_xformers_memory_efficient_attention(self):
|
||||||
|
r"""
|
||||||
|
Disable memory efficient attention as implemented in xformers.
|
||||||
|
"""
|
||||||
|
self.set_use_memory_efficient_attention_xformers(False)
|
||||||
|
|
||||||
def save_pretrained(
|
def save_pretrained(
|
||||||
self,
|
self,
|
||||||
save_directory: Union[str, os.PathLike],
|
save_directory: Union[str, os.PathLike],
|
||||||
|
|||||||
@@ -12,7 +12,6 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import math
|
import math
|
||||||
import warnings
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
@@ -447,16 +446,6 @@ class BasicTransformerBlock(nn.Module):
|
|||||||
# 3. Feed-forward
|
# 3. Feed-forward
|
||||||
self.norm3 = nn.LayerNorm(dim)
|
self.norm3 = nn.LayerNorm(dim)
|
||||||
|
|
||||||
# if xformers is installed try to use memory_efficient_attention by default
|
|
||||||
if is_xformers_available():
|
|
||||||
try:
|
|
||||||
self.set_use_memory_efficient_attention_xformers(True)
|
|
||||||
except Exception as e:
|
|
||||||
warnings.warn(
|
|
||||||
"Could not enable memory efficient attention. Make sure xformers is installed"
|
|
||||||
f" correctly and a GPU is available: {e}"
|
|
||||||
)
|
|
||||||
|
|
||||||
def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_attention_xformers: bool):
|
def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_attention_xformers: bool):
|
||||||
if not is_xformers_available():
|
if not is_xformers_available():
|
||||||
print("Here is how to install it")
|
print("Here is how to install it")
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ if is_transformers_available() and is_torch_available():
|
|||||||
from .safety_checker import StableDiffusionSafetyChecker
|
from .safety_checker import StableDiffusionSafetyChecker
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if not (is_transformers_available() and is_torch_available()):
|
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
|
||||||
raise OptionalDependencyNotAvailable()
|
raise OptionalDependencyNotAvailable()
|
||||||
except OptionalDependencyNotAvailable:
|
except OptionalDependencyNotAvailable:
|
||||||
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
|
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from ...utils import (
|
|||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if not (is_transformers_available() and is_torch_available()):
|
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
|
||||||
raise OptionalDependencyNotAvailable()
|
raise OptionalDependencyNotAvailable()
|
||||||
except OptionalDependencyNotAvailable:
|
except OptionalDependencyNotAvailable:
|
||||||
from ...utils.dummy_torch_and_transformers_objects import (
|
from ...utils.dummy_torch_and_transformers_objects import (
|
||||||
|
|||||||
@@ -354,7 +354,20 @@ def requires_backends(obj, backends):
|
|||||||
if failed:
|
if failed:
|
||||||
raise ImportError("".join(failed))
|
raise ImportError("".join(failed))
|
||||||
|
|
||||||
if name in ["StableDiffusionDepth2ImgPipeline"] and is_transformers_version("<", "4.26.0.dev0"):
|
if name in [
|
||||||
|
"VersatileDiffusionTextToImagePipeline",
|
||||||
|
"VersatileDiffusionPipeline",
|
||||||
|
"VersatileDiffusionDualGuidedPipeline",
|
||||||
|
"StableDiffusionImageVariationPipeline",
|
||||||
|
] and is_transformers_version("<", "4.25.0"):
|
||||||
|
raise ImportError(
|
||||||
|
f"You need to install `transformers>=4.25` in order to use {name}: \n```\n pip install"
|
||||||
|
" --upgrade transformers \n```"
|
||||||
|
)
|
||||||
|
|
||||||
|
if name in [
|
||||||
|
"StableDiffusionDepth2ImgPipeline",
|
||||||
|
] and is_transformers_version("<", "4.26.0.dev0"):
|
||||||
raise ImportError(
|
raise ImportError(
|
||||||
f"You need to install `transformers` from 'main' in order to use {name}: \n```\n pip install"
|
f"You need to install `transformers` from 'main' in order to use {name}: \n```\n pip install"
|
||||||
" git+https://github.com/huggingface/transformers \n```"
|
" git+https://github.com/huggingface/transformers \n```"
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ from diffusers.utils import (
|
|||||||
torch_all_close,
|
torch_all_close,
|
||||||
torch_device,
|
torch_device,
|
||||||
)
|
)
|
||||||
|
from diffusers.utils.import_utils import is_xformers_available
|
||||||
from parameterized import parameterized
|
from parameterized import parameterized
|
||||||
|
|
||||||
from ..test_modeling_common import ModelTesterMixin
|
from ..test_modeling_common import ModelTesterMixin
|
||||||
@@ -255,6 +256,20 @@ class UNet2DConditionModelTests(ModelTesterMixin, unittest.TestCase):
|
|||||||
inputs_dict = self.dummy_input
|
inputs_dict = self.dummy_input
|
||||||
return init_dict, inputs_dict
|
return init_dict, inputs_dict
|
||||||
|
|
||||||
|
@unittest.skipIf(
|
||||||
|
torch_device != "cuda" or not is_xformers_available(),
|
||||||
|
reason="XFormers attention is only available with CUDA and `xformers` installed",
|
||||||
|
)
|
||||||
|
def test_xformers_enable_works(self):
|
||||||
|
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
|
||||||
|
model = self.model_class(**init_dict)
|
||||||
|
|
||||||
|
model.enable_xformers_memory_efficient_attention()
|
||||||
|
|
||||||
|
assert (
|
||||||
|
model.mid_block.attentions[0].transformer_blocks[0].attn1._use_memory_efficient_attention_xformers
|
||||||
|
), "xformers is not enabled"
|
||||||
|
|
||||||
@unittest.skipIf(torch_device == "mps", "Gradient checkpointing skipped on MPS")
|
@unittest.skipIf(torch_device == "mps", "Gradient checkpointing skipped on MPS")
|
||||||
def test_gradient_checkpointing(self):
|
def test_gradient_checkpointing(self):
|
||||||
# enable deterministic behavior for gradient checkpointing
|
# enable deterministic behavior for gradient checkpointing
|
||||||
|
|||||||
Reference in New Issue
Block a user