mirror of
https://github.com/huggingface/diffusers.git
synced 2025-12-26 22:34:48 +08:00
Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c1b378db69 | ||
|
|
b50a9ae383 |
@@ -123,7 +123,7 @@ The class provides functionality to compute previous image according to alpha, b
|
||||
**With `pip`**
|
||||
|
||||
```bash
|
||||
pip install --upgrade diffusers # should install diffusers 0.2.0
|
||||
pip install --upgrade diffusers # should install diffusers 0.2.1
|
||||
```
|
||||
|
||||
**With `conda`**
|
||||
|
||||
2
setup.py
2
setup.py
@@ -181,7 +181,7 @@ install_requires = [
|
||||
|
||||
setup(
|
||||
name="diffusers",
|
||||
version="0.2.0",
|
||||
version="0.2.1",
|
||||
description="Diffusers",
|
||||
long_description=open("README.md", "r", encoding="utf-8").read(),
|
||||
long_description_content_type="text/markdown",
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
from .utils import is_inflect_available, is_scipy_available, is_transformers_available, is_unidecode_available
|
||||
|
||||
|
||||
__version__ = "0.2.0"
|
||||
__version__ = "0.2.1"
|
||||
|
||||
from .modeling_utils import ModelMixin
|
||||
from .models import AutoencoderKL, UNet2DConditionModel, UNet2DModel, VQModel
|
||||
|
||||
@@ -96,6 +96,10 @@ class StableDiffusionPipeline(DiffusionPipeline):
|
||||
|
||||
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
|
||||
|
||||
# if we use LMSDiscreteScheduler, let's make sure latents are mulitplied by sigmas
|
||||
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
||||
latents = latents * self.scheduler.sigmas[0]
|
||||
|
||||
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
||||
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
||||
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
||||
@@ -105,10 +109,6 @@ class StableDiffusionPipeline(DiffusionPipeline):
|
||||
if accepts_eta:
|
||||
extra_step_kwargs["eta"] = eta
|
||||
|
||||
self.scheduler.set_timesteps(num_inference_steps)
|
||||
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
||||
latents = latents * self.scheduler.sigmas[0]
|
||||
|
||||
for i, t in tqdm(enumerate(self.scheduler.timesteps)):
|
||||
# expand the latents if we are doing classifier free guidance
|
||||
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
||||
|
||||
@@ -36,8 +36,8 @@ class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin):
|
||||
tensor_format="pt",
|
||||
):
|
||||
"""
|
||||
Linear Multistep Scheduler for discrete beta schedules.
|
||||
Based on the original k-diffusion implementation by Katherine Crowson:
|
||||
Linear Multistep Scheduler for discrete beta schedules. Based on the original k-diffusion implementation by
|
||||
Katherine Crowson:
|
||||
https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181
|
||||
"""
|
||||
|
||||
|
||||
Reference in New Issue
Block a user