Compare commits

...

7 Commits

Author SHA1 Message Date
Patrick von Platen
8e44aa4b9f fix transformers naming 2023-02-20 09:39:01 +02:00
Patrick von Platen
126f32775b make style 2023-02-20 09:35:00 +02:00
Haofan Wang
92679c4851 Update pipeline_utils.py (#2415) 2023-02-20 09:34:54 +02:00
Patrick von Platen
a41b043570 Release: v0.13.1 2023-02-20 09:15:19 +02:00
YiYi Xu
ba441fe534 fix the get_indices function (#2418)
Co-authored-by: yiyixuxu <yixu310@gmail,com>
2023-02-20 09:13:49 +02:00
Patrick von Platen
ef86993568 Fix deprecation warning (#2426)
Deprecation warning should only hit at version 0.15
2023-02-20 09:13:42 +02:00
Sayak Paul
dca9191fc6 remove author names. (#2428)
* remove author names.

* add: demo link to panorama.
2023-02-20 09:13:14 +02:00
6 changed files with 16 additions and 9 deletions

View File

@@ -25,12 +25,13 @@ Resources:
* [Project Page](https://multidiffusion.github.io/).
* [Paper](https://arxiv.org/abs/2302.08113).
* [Original Code](https://github.com/omerbt/MultiDiffusion).
* [Demo](https://huggingface.co/spaces/weizmannscience/MultiDiffusion).
## Available Pipelines:
| Pipeline | Tasks
|---|---|
| [StableDiffusionPanoramaPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_panorama.py) | *Text-Guided Panorama View Generation* |
| Pipeline | Tasks | Demo
|---|---|:---:|
| [StableDiffusionPanoramaPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_panorama.py) | *Text-Guided Panorama View Generation* | [🤗 Space](https://huggingface.co/spaces/weizmannscience/MultiDiffusion)) |
<!-- TODO: add Colab -->

View File

@@ -14,7 +14,7 @@ specific language governing permissions and limitations under the License.
## Overview
[Zero-shot Image-to-Image Translation](https://arxiv.org/abs/2302.03027) by Gaurav Parmar, Krishna Kumar Singh, Richard Zhang, Yijun Li, Jingwan Lu, and Jun-Yan Zhu.
[Zero-shot Image-to-Image Translation](https://arxiv.org/abs/2302.03027).
The abstract of the paper is the following:

View File

@@ -219,7 +219,7 @@ install_requires = [
setup(
name="diffusers",
version="0.13.0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
version="0.13.1", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
description="Diffusers",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",

View File

@@ -1,4 +1,4 @@
__version__ = "0.13.0"
__version__ = "0.13.1"
from .configuration_utils import ConfigMixin
from .utils import (

View File

@@ -537,7 +537,7 @@ class DiffusionPipeline(ConfigMixin):
if revision in DEPRECATED_REVISION_ARGS and version.parse(
version.parse(__version__).base_version
) >= version.parse("0.10.0"):
) >= version.parse("0.15.0"):
info = model_info(
pretrained_model_name_or_path,
use_auth_token=use_auth_token,
@@ -825,7 +825,12 @@ class DiffusionPipeline(ConfigMixin):
loading_kwargs["sess_options"] = sess_options
is_diffusers_model = issubclass(class_obj, diffusers.ModelMixin)
transformers_version = version.parse(version.parse(transformers.__version__).base_version)
if is_transformers_available():
transformers_version = version.parse(version.parse(transformers.__version__).base_version)
else:
transformers_version = "N/A"
is_transformers_model = (
is_transformers_available()
and issubclass(class_obj, PreTrainedModel)

View File

@@ -47,6 +47,7 @@ EXAMPLE_DOC_STRING = """
>>> # use get_indices function to find out indices of the tokens you want to alter
>>> pipe.get_indices(prompt)
{0: '<|startoftext|>', 1: 'a</w>', 2: 'cat</w>', 3: 'and</w>', 4: 'a</w>', 5: 'frog</w>', 6: '<|endoftext|>'}
>>> token_indices = [2, 5]
>>> seed = 6141
@@ -662,7 +663,7 @@ class StableDiffusionAttendAndExcitePipeline(DiffusionPipeline):
def get_indices(self, prompt: str) -> Dict[str, int]:
"""Utility function to list the indices of the tokens you wish to alte"""
ids = self.tokenizer(prompt).input_ids
indices = {tok: i for tok, i in zip(self.tokenizer.convert_ids_to_tokens(ids), range(len(ids)))}
indices = {i: tok for tok, i in zip(self.tokenizer.convert_ids_to_tokens(ids), range(len(ids)))}
return indices
@torch.no_grad()