mirror of
https://github.com/huggingface/diffusers.git
synced 2026-03-03 15:20:37 +08:00
Compare commits
8 Commits
use-fixtur
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bbbcdd87bd | ||
|
|
47e8faf3b9 | ||
|
|
c2fdd2d048 | ||
|
|
84ff061b1d | ||
|
|
3fd14f1acf | ||
|
|
e7fe4ce92f | ||
|
|
3d9085565b | ||
|
|
5b54496131 |
14
.github/workflows/benchmark.yml
vendored
14
.github/workflows/benchmark.yml
vendored
@@ -62,20 +62,6 @@ jobs:
|
||||
with:
|
||||
name: benchmark_test_reports
|
||||
path: benchmarks/${{ env.BASE_PATH }}
|
||||
|
||||
# TODO: enable this once the connection problem has been resolved.
|
||||
- name: Update benchmarking results to DB
|
||||
env:
|
||||
PGDATABASE: metrics
|
||||
PGHOST: ${{ secrets.DIFFUSERS_BENCHMARKS_PGHOST }}
|
||||
PGUSER: transformers_benchmarks
|
||||
PGPASSWORD: ${{ secrets.DIFFUSERS_BENCHMARKS_PGPASSWORD }}
|
||||
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
|
||||
run: |
|
||||
git config --global --add safe.directory /__w/diffusers/diffusers
|
||||
commit_id=$GITHUB_SHA
|
||||
commit_msg=$(git show -s --format=%s "$commit_id" | cut -c1-70)
|
||||
cd benchmarks && python populate_into_db.py "$BRANCH_NAME" "$commit_id" "$commit_msg"
|
||||
|
||||
- name: Report success status
|
||||
if: ${{ success() }}
|
||||
|
||||
@@ -1,166 +0,0 @@
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
|
||||
import gpustat
|
||||
import pandas as pd
|
||||
import psycopg2
|
||||
import psycopg2.extras
|
||||
from psycopg2.extensions import register_adapter
|
||||
from psycopg2.extras import Json
|
||||
|
||||
|
||||
register_adapter(dict, Json)
|
||||
|
||||
FINAL_CSV_FILENAME = "collated_results.csv"
|
||||
# https://github.com/huggingface/transformers/blob/593e29c5e2a9b17baec010e8dc7c1431fed6e841/benchmark/init_db.sql#L27
|
||||
BENCHMARKS_TABLE_NAME = "benchmarks"
|
||||
MEASUREMENTS_TABLE_NAME = "model_measurements"
|
||||
|
||||
|
||||
def _init_benchmark(conn, branch, commit_id, commit_msg):
|
||||
gpu_stats = gpustat.GPUStatCollection.new_query()
|
||||
metadata = {"gpu_name": gpu_stats[0]["name"]}
|
||||
repository = "huggingface/diffusers"
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(
|
||||
f"INSERT INTO {BENCHMARKS_TABLE_NAME} (repository, branch, commit_id, commit_message, metadata) VALUES (%s, %s, %s, %s, %s) RETURNING benchmark_id",
|
||||
(repository, branch, commit_id, commit_msg, metadata),
|
||||
)
|
||||
benchmark_id = cur.fetchone()[0]
|
||||
print(f"Initialised benchmark #{benchmark_id}")
|
||||
return benchmark_id
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"branch",
|
||||
type=str,
|
||||
help="The branch name on which the benchmarking is performed.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"commit_id",
|
||||
type=str,
|
||||
help="The commit hash on which the benchmarking is performed.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"commit_msg",
|
||||
type=str,
|
||||
help="The commit message associated with the commit, truncated to 70 characters.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
try:
|
||||
conn = psycopg2.connect(
|
||||
host=os.getenv("PGHOST"),
|
||||
database=os.getenv("PGDATABASE"),
|
||||
user=os.getenv("PGUSER"),
|
||||
password=os.getenv("PGPASSWORD"),
|
||||
)
|
||||
print("DB connection established successfully.")
|
||||
except Exception as e:
|
||||
print(f"Problem during DB init: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
benchmark_id = _init_benchmark(
|
||||
conn=conn,
|
||||
branch=args.branch,
|
||||
commit_id=args.commit_id,
|
||||
commit_msg=args.commit_msg,
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Problem during initializing benchmark: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
cur = conn.cursor()
|
||||
|
||||
df = pd.read_csv(FINAL_CSV_FILENAME)
|
||||
|
||||
# Helper to cast values (or None) given a dtype
|
||||
def _cast_value(val, dtype: str):
|
||||
if pd.isna(val):
|
||||
return None
|
||||
|
||||
if dtype == "text":
|
||||
return str(val).strip()
|
||||
|
||||
if dtype == "float":
|
||||
try:
|
||||
return float(val)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
if dtype == "bool":
|
||||
s = str(val).strip().lower()
|
||||
if s in ("true", "t", "yes", "1"):
|
||||
return True
|
||||
if s in ("false", "f", "no", "0"):
|
||||
return False
|
||||
if val in (1, 1.0):
|
||||
return True
|
||||
if val in (0, 0.0):
|
||||
return False
|
||||
return None
|
||||
|
||||
return val
|
||||
|
||||
try:
|
||||
rows_to_insert = []
|
||||
for _, row in df.iterrows():
|
||||
scenario = _cast_value(row.get("scenario"), "text")
|
||||
model_cls = _cast_value(row.get("model_cls"), "text")
|
||||
num_params_B = _cast_value(row.get("num_params_B"), "float")
|
||||
flops_G = _cast_value(row.get("flops_G"), "float")
|
||||
time_plain_s = _cast_value(row.get("time_plain_s"), "float")
|
||||
mem_plain_GB = _cast_value(row.get("mem_plain_GB"), "float")
|
||||
time_compile_s = _cast_value(row.get("time_compile_s"), "float")
|
||||
mem_compile_GB = _cast_value(row.get("mem_compile_GB"), "float")
|
||||
fullgraph = _cast_value(row.get("fullgraph"), "bool")
|
||||
mode = _cast_value(row.get("mode"), "text")
|
||||
|
||||
# If "github_sha" column exists in the CSV, cast it; else default to None
|
||||
if "github_sha" in df.columns:
|
||||
github_sha = _cast_value(row.get("github_sha"), "text")
|
||||
else:
|
||||
github_sha = None
|
||||
|
||||
measurements = {
|
||||
"scenario": scenario,
|
||||
"model_cls": model_cls,
|
||||
"num_params_B": num_params_B,
|
||||
"flops_G": flops_G,
|
||||
"time_plain_s": time_plain_s,
|
||||
"mem_plain_GB": mem_plain_GB,
|
||||
"time_compile_s": time_compile_s,
|
||||
"mem_compile_GB": mem_compile_GB,
|
||||
"fullgraph": fullgraph,
|
||||
"mode": mode,
|
||||
"github_sha": github_sha,
|
||||
}
|
||||
rows_to_insert.append((benchmark_id, measurements))
|
||||
|
||||
# Batch-insert all rows
|
||||
insert_sql = f"""
|
||||
INSERT INTO {MEASUREMENTS_TABLE_NAME} (
|
||||
benchmark_id,
|
||||
measurements
|
||||
)
|
||||
VALUES (%s, %s);
|
||||
"""
|
||||
|
||||
psycopg2.extras.execute_batch(cur, insert_sql, rows_to_insert)
|
||||
conn.commit()
|
||||
|
||||
cur.close()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Exception: {e}")
|
||||
sys.exit(1)
|
||||
@@ -97,5 +97,32 @@ If the custom model inherits from the [`ModelMixin`] class, it gets access to th
|
||||
> )
|
||||
> ```
|
||||
|
||||
### Saving custom models
|
||||
|
||||
Use [`~ConfigMixin.register_for_auto_class`] to add the `auto_map` entry to `config.json` automatically when saving. This avoids having to manually edit the config file.
|
||||
|
||||
```py
|
||||
# my_model.py
|
||||
from diffusers import ModelMixin, ConfigMixin
|
||||
|
||||
class MyCustomModel(ModelMixin, ConfigMixin):
|
||||
...
|
||||
|
||||
MyCustomModel.register_for_auto_class("AutoModel")
|
||||
|
||||
model = MyCustomModel(...)
|
||||
model.save_pretrained("./my_model")
|
||||
```
|
||||
|
||||
The saved `config.json` will include the `auto_map` field.
|
||||
|
||||
```json
|
||||
{
|
||||
"auto_map": {
|
||||
"AutoModel": "my_model.MyCustomModel"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> Learn more about implementing custom models in the [Community components](../using-diffusers/custom_pipeline_overview#community-components) guide.
|
||||
@@ -107,6 +107,38 @@ class ConfigMixin:
|
||||
has_compatibles = False
|
||||
|
||||
_deprecated_kwargs = []
|
||||
_auto_class = None
|
||||
|
||||
@classmethod
|
||||
def register_for_auto_class(cls, auto_class="AutoModel"):
|
||||
"""
|
||||
Register this class with the given auto class so that it can be loaded with `AutoModel.from_pretrained(...,
|
||||
trust_remote_code=True)`.
|
||||
|
||||
When the config is saved, the resulting `config.json` will include an `auto_map` entry mapping the auto class
|
||||
to this class's module and class name.
|
||||
|
||||
Args:
|
||||
auto_class (`str` or type, *optional*, defaults to `"AutoModel"`):
|
||||
The auto class to register this class with. Can be a string (e.g. `"AutoModel"`) or the class itself.
|
||||
Currently only `"AutoModel"` is supported.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
from diffusers import ModelMixin, ConfigMixin
|
||||
|
||||
|
||||
class MyCustomModel(ModelMixin, ConfigMixin): ...
|
||||
|
||||
|
||||
MyCustomModel.register_for_auto_class("AutoModel")
|
||||
```
|
||||
"""
|
||||
if auto_class != "AutoModel":
|
||||
raise ValueError(f"Only 'AutoModel' is supported, got '{auto_class}'.")
|
||||
|
||||
cls._auto_class = auto_class
|
||||
|
||||
def register_to_config(self, **kwargs):
|
||||
if self.config_name is None:
|
||||
@@ -621,6 +653,12 @@ class ConfigMixin:
|
||||
# pop the `_pre_quantization_dtype` as torch.dtypes are not serializable.
|
||||
_ = config_dict.pop("_pre_quantization_dtype", None)
|
||||
|
||||
if getattr(self, "_auto_class", None) is not None:
|
||||
module = self.__class__.__module__.split(".")[-1]
|
||||
auto_map = config_dict.get("auto_map", {})
|
||||
auto_map[self._auto_class] = f"{module}.{self.__class__.__name__}"
|
||||
config_dict["auto_map"] = auto_map
|
||||
|
||||
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
|
||||
|
||||
def to_json_file(self, json_file_path: str | os.PathLike):
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
import importlib
|
||||
import inspect
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
from collections import OrderedDict
|
||||
@@ -28,10 +29,16 @@ from tqdm.auto import tqdm
|
||||
from typing_extensions import Self
|
||||
|
||||
from ..configuration_utils import ConfigMixin, FrozenDict
|
||||
from ..pipelines.pipeline_loading_utils import _fetch_class_library_tuple, simple_get_class_obj
|
||||
from ..pipelines.pipeline_loading_utils import (
|
||||
LOADABLE_CLASSES,
|
||||
_fetch_class_library_tuple,
|
||||
_unwrap_model,
|
||||
simple_get_class_obj,
|
||||
)
|
||||
from ..utils import PushToHubMixin, is_accelerate_available, logging
|
||||
from ..utils.dynamic_modules_utils import get_class_from_dynamic_module, resolve_trust_remote_code
|
||||
from ..utils.hub_utils import load_or_create_model_card, populate_model_card
|
||||
from ..utils.torch_utils import is_compiled_module
|
||||
from .components_manager import ComponentsManager
|
||||
from .modular_pipeline_utils import (
|
||||
MODULAR_MODEL_CARD_TEMPLATE,
|
||||
@@ -1826,44 +1833,136 @@ class ModularPipeline(ConfigMixin, PushToHubMixin):
|
||||
)
|
||||
return pipeline
|
||||
|
||||
def save_pretrained(self, save_directory: str | os.PathLike, push_to_hub: bool = False, **kwargs):
|
||||
def save_pretrained(
|
||||
self,
|
||||
save_directory: str | os.PathLike,
|
||||
safe_serialization: bool = True,
|
||||
variant: str | None = None,
|
||||
max_shard_size: int | str | None = None,
|
||||
push_to_hub: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Save the pipeline to a directory. It does not save components, you need to save them separately.
|
||||
Save the pipeline and all its components to a directory, so that it can be re-loaded using the
|
||||
[`~ModularPipeline.from_pretrained`] class method.
|
||||
|
||||
Args:
|
||||
save_directory (`str` or `os.PathLike`):
|
||||
Path to the directory where the pipeline will be saved.
|
||||
push_to_hub (`bool`, optional):
|
||||
Whether to push the pipeline to the huggingface hub.
|
||||
**kwargs: Additional arguments passed to `save_config()` method
|
||||
Directory to save the pipeline to. Will be created if it doesn't exist.
|
||||
safe_serialization (`bool`, *optional*, defaults to `True`):
|
||||
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
|
||||
variant (`str`, *optional*):
|
||||
If specified, weights are saved in the format `pytorch_model.<variant>.bin`.
|
||||
max_shard_size (`int` or `str`, defaults to `None`):
|
||||
The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
|
||||
lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5GB"`).
|
||||
If expressed as an integer, the unit is bytes.
|
||||
push_to_hub (`bool`, *optional*, defaults to `False`):
|
||||
Whether to push the pipeline to the Hugging Face model hub after saving it.
|
||||
**kwargs: Additional keyword arguments:
|
||||
- `overwrite_modular_index` (`bool`, *optional*, defaults to `False`):
|
||||
When saving a Modular Pipeline, its components in `modular_model_index.json` may reference repos
|
||||
different from the destination repo. Setting this to `True` updates all component references in
|
||||
`modular_model_index.json` so they point to the repo specified by `repo_id`.
|
||||
- `repo_id` (`str`, *optional*):
|
||||
The repository ID to push the pipeline to. Defaults to the last component of `save_directory`.
|
||||
- `commit_message` (`str`, *optional*):
|
||||
Commit message for the push to hub operation.
|
||||
- `private` (`bool`, *optional*):
|
||||
Whether the repository should be private.
|
||||
- `create_pr` (`bool`, *optional*, defaults to `False`):
|
||||
Whether to create a pull request instead of pushing directly.
|
||||
- `token` (`str`, *optional*):
|
||||
The Hugging Face token to use for authentication.
|
||||
"""
|
||||
overwrite_modular_index = kwargs.pop("overwrite_modular_index", False)
|
||||
repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
|
||||
|
||||
if push_to_hub:
|
||||
commit_message = kwargs.pop("commit_message", None)
|
||||
private = kwargs.pop("private", None)
|
||||
create_pr = kwargs.pop("create_pr", False)
|
||||
token = kwargs.pop("token", None)
|
||||
repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
|
||||
update_model_card = kwargs.pop("update_model_card", False)
|
||||
repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id
|
||||
|
||||
# Generate modular pipeline card content
|
||||
card_content = generate_modular_model_card_content(self.blocks)
|
||||
for component_name, component_spec in self._component_specs.items():
|
||||
if component_spec.default_creation_method != "from_pretrained":
|
||||
continue
|
||||
|
||||
# Create a new empty model card and eventually tag it
|
||||
component = getattr(self, component_name, None)
|
||||
if component is None:
|
||||
continue
|
||||
|
||||
model_cls = component.__class__
|
||||
if is_compiled_module(component):
|
||||
component = _unwrap_model(component)
|
||||
model_cls = component.__class__
|
||||
|
||||
save_method_name = None
|
||||
for library_name, library_classes in LOADABLE_CLASSES.items():
|
||||
if library_name in sys.modules:
|
||||
library = importlib.import_module(library_name)
|
||||
else:
|
||||
logger.info(
|
||||
f"{library_name} is not installed. Cannot save {component_name} as {library_classes} from {library_name}"
|
||||
)
|
||||
continue
|
||||
|
||||
for base_class, save_load_methods in library_classes.items():
|
||||
class_candidate = getattr(library, base_class, None)
|
||||
if class_candidate is not None and issubclass(model_cls, class_candidate):
|
||||
save_method_name = save_load_methods[0]
|
||||
break
|
||||
if save_method_name is not None:
|
||||
break
|
||||
|
||||
if save_method_name is None:
|
||||
logger.warning(f"self.{component_name}={component} of type {type(component)} cannot be saved.")
|
||||
continue
|
||||
|
||||
save_method = getattr(component, save_method_name)
|
||||
save_method_signature = inspect.signature(save_method)
|
||||
save_method_accept_safe = "safe_serialization" in save_method_signature.parameters
|
||||
save_method_accept_variant = "variant" in save_method_signature.parameters
|
||||
save_method_accept_max_shard_size = "max_shard_size" in save_method_signature.parameters
|
||||
|
||||
save_kwargs = {}
|
||||
if save_method_accept_safe:
|
||||
save_kwargs["safe_serialization"] = safe_serialization
|
||||
if save_method_accept_variant:
|
||||
save_kwargs["variant"] = variant
|
||||
if save_method_accept_max_shard_size and max_shard_size is not None:
|
||||
save_kwargs["max_shard_size"] = max_shard_size
|
||||
|
||||
component_save_path = os.path.join(save_directory, component_name)
|
||||
save_method(component_save_path, **save_kwargs)
|
||||
|
||||
if component_name not in self.config:
|
||||
continue
|
||||
|
||||
has_no_load_id = not hasattr(component, "_diffusers_load_id") or component._diffusers_load_id == "null"
|
||||
if overwrite_modular_index or has_no_load_id:
|
||||
library, class_name, component_spec_dict = self.config[component_name]
|
||||
component_spec_dict["pretrained_model_name_or_path"] = repo_id if push_to_hub else save_directory
|
||||
component_spec_dict["subfolder"] = component_name
|
||||
self.register_to_config(**{component_name: (library, class_name, component_spec_dict)})
|
||||
|
||||
self.save_config(save_directory=save_directory)
|
||||
|
||||
if push_to_hub:
|
||||
card_content = generate_modular_model_card_content(self.blocks)
|
||||
model_card = load_or_create_model_card(
|
||||
repo_id,
|
||||
token=token,
|
||||
is_pipeline=True,
|
||||
model_description=MODULAR_MODEL_CARD_TEMPLATE.format(**card_content),
|
||||
is_modular=True,
|
||||
update_model_card=update_model_card,
|
||||
)
|
||||
model_card = populate_model_card(model_card, tags=card_content["tags"])
|
||||
|
||||
model_card.save(os.path.join(save_directory, "README.md"))
|
||||
|
||||
# YiYi TODO: maybe order the json file to make it more readable: configs first, then components
|
||||
self.save_config(save_directory=save_directory)
|
||||
|
||||
if push_to_hub:
|
||||
self._upload_folder(
|
||||
save_directory,
|
||||
repo_id,
|
||||
@@ -2131,8 +2230,9 @@ class ModularPipeline(ConfigMixin, PushToHubMixin):
|
||||
```
|
||||
|
||||
Notes:
|
||||
- Components with trained weights should be loaded with `AutoModel.from_pretrained()` or
|
||||
`ComponentSpec.load()` so that loading specs are preserved for serialization.
|
||||
- Components loaded with `AutoModel.from_pretrained()` or `ComponentSpec.load()` will have
|
||||
loading specs preserved for serialization. Custom or locally loaded components without Hub references will
|
||||
have their `modular_model_index.json` entries updated automatically during `save_pretrained()`.
|
||||
- ConfigMixin objects without weights (e.g., schedulers, guiders) can be passed directly.
|
||||
"""
|
||||
|
||||
@@ -2154,14 +2254,6 @@ class ModularPipeline(ConfigMixin, PushToHubMixin):
|
||||
new_component_spec = current_component_spec
|
||||
if hasattr(self, name) and getattr(self, name) is not None:
|
||||
logger.warning(f"ModularPipeline.update_components: setting {name} to None (spec unchanged)")
|
||||
elif current_component_spec.default_creation_method == "from_pretrained" and not (
|
||||
hasattr(component, "_diffusers_load_id") and component._diffusers_load_id is not None
|
||||
):
|
||||
logger.warning(
|
||||
f"ModularPipeline.update_components: {name} has no valid _diffusers_load_id. "
|
||||
f"This will result in empty loading spec, use ComponentSpec.load() for proper specs"
|
||||
)
|
||||
new_component_spec = ComponentSpec(name=name, type_hint=type(component))
|
||||
else:
|
||||
new_component_spec = ComponentSpec.from_component(name, component)
|
||||
|
||||
|
||||
@@ -50,11 +50,7 @@ This modular pipeline is composed of the following blocks:
|
||||
|
||||
{components_description} {configs_section}
|
||||
|
||||
## Input/Output Specification
|
||||
|
||||
### Inputs {inputs_description}
|
||||
|
||||
### Outputs {outputs_description}
|
||||
{io_specification_section}
|
||||
"""
|
||||
|
||||
|
||||
@@ -311,6 +307,12 @@ class ComponentSpec:
|
||||
f"`type_hint` is required when loading a single file model but is missing for component: {self.name}"
|
||||
)
|
||||
|
||||
# `torch_dtype` is not an accepted parameter for tokenizers and processors.
|
||||
# As a result, it gets stored in `init_kwargs`, which are written to the config
|
||||
# during save. This causes JSON serialization to fail when saving the component.
|
||||
if self.type_hint is not None and not issubclass(self.type_hint, torch.nn.Module):
|
||||
kwargs.pop("torch_dtype", None)
|
||||
|
||||
if self.type_hint is None:
|
||||
try:
|
||||
from diffusers import AutoModel
|
||||
@@ -328,6 +330,12 @@ class ComponentSpec:
|
||||
else getattr(self.type_hint, "from_pretrained")
|
||||
)
|
||||
|
||||
# `torch_dtype` is not an accepted parameter for tokenizers and processors.
|
||||
# As a result, it gets stored in `init_kwargs`, which are written to the config
|
||||
# during save. This causes JSON serialization to fail when saving the component.
|
||||
if not issubclass(self.type_hint, torch.nn.Module):
|
||||
kwargs.pop("torch_dtype", None)
|
||||
|
||||
try:
|
||||
component = load_method(pretrained_model_name_or_path, **load_kwargs, **kwargs)
|
||||
except Exception as e:
|
||||
@@ -799,6 +807,46 @@ def format_output_params(output_params, indent_level=4, max_line_length=115):
|
||||
return format_params(output_params, "Outputs", indent_level, max_line_length)
|
||||
|
||||
|
||||
def format_params_markdown(params, header="Inputs"):
|
||||
"""Format a list of InputParam or OutputParam objects as a markdown bullet-point list.
|
||||
|
||||
Suitable for model cards rendered on Hugging Face Hub.
|
||||
|
||||
Args:
|
||||
params: list of InputParam or OutputParam objects to format
|
||||
header: Header text (e.g. "Inputs" or "Outputs")
|
||||
|
||||
Returns:
|
||||
A formatted markdown string, or empty string if params is empty.
|
||||
"""
|
||||
if not params:
|
||||
return ""
|
||||
|
||||
def get_type_str(type_hint):
|
||||
if isinstance(type_hint, UnionType) or get_origin(type_hint) is Union:
|
||||
type_strs = [t.__name__ if hasattr(t, "__name__") else str(t) for t in get_args(type_hint)]
|
||||
return " | ".join(type_strs)
|
||||
return type_hint.__name__ if hasattr(type_hint, "__name__") else str(type_hint)
|
||||
|
||||
lines = [f"**{header}:**\n"] if header else []
|
||||
for param in params:
|
||||
type_str = get_type_str(param.type_hint) if param.type_hint != Any else ""
|
||||
name = f"**{param.kwargs_type}" if param.name is None and param.kwargs_type is not None else param.name
|
||||
param_str = f"- `{name}` (`{type_str}`"
|
||||
|
||||
if hasattr(param, "required") and not param.required:
|
||||
param_str += ", *optional*"
|
||||
if param.default is not None:
|
||||
param_str += f", defaults to `{param.default}`"
|
||||
param_str += ")"
|
||||
|
||||
desc = param.description if param.description else "No description provided"
|
||||
param_str += f": {desc}"
|
||||
lines.append(param_str)
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def format_components(components, indent_level=4, max_line_length=115, add_empty_lines=True):
|
||||
"""Format a list of ComponentSpec objects into a readable string representation.
|
||||
|
||||
@@ -1055,8 +1103,7 @@ def generate_modular_model_card_content(blocks) -> dict[str, Any]:
|
||||
- blocks_description: Detailed architecture of blocks
|
||||
- components_description: List of required components
|
||||
- configs_section: Configuration parameters section
|
||||
- inputs_description: Input parameters specification
|
||||
- outputs_description: Output parameters specification
|
||||
- io_specification_section: Input/Output specification (per-workflow or unified)
|
||||
- trigger_inputs_section: Conditional execution information
|
||||
- tags: List of relevant tags for the model card
|
||||
"""
|
||||
@@ -1075,15 +1122,6 @@ def generate_modular_model_card_content(blocks) -> dict[str, Any]:
|
||||
if block_desc:
|
||||
blocks_desc_parts.append(f" - {block_desc}")
|
||||
|
||||
# add sub-blocks if any
|
||||
if hasattr(block, "sub_blocks") and block.sub_blocks:
|
||||
for sub_name, sub_block in block.sub_blocks.items():
|
||||
sub_class = sub_block.__class__.__name__
|
||||
sub_desc = sub_block.description.split("\n")[0] if getattr(sub_block, "description", "") else ""
|
||||
blocks_desc_parts.append(f" - *{sub_name}*: `{sub_class}`")
|
||||
if sub_desc:
|
||||
blocks_desc_parts.append(f" - {sub_desc}")
|
||||
|
||||
blocks_description = "\n".join(blocks_desc_parts) if blocks_desc_parts else "No blocks defined."
|
||||
|
||||
components = getattr(blocks, "expected_components", [])
|
||||
@@ -1109,63 +1147,76 @@ def generate_modular_model_card_content(blocks) -> dict[str, Any]:
|
||||
if configs_description:
|
||||
configs_section = f"\n\n## Configuration Parameters\n\n{configs_description}"
|
||||
|
||||
inputs = blocks.inputs
|
||||
outputs = blocks.outputs
|
||||
# Branch on whether workflows are defined
|
||||
has_workflows = getattr(blocks, "_workflow_map", None) is not None
|
||||
|
||||
# format inputs as markdown list
|
||||
inputs_parts = []
|
||||
required_inputs = [inp for inp in inputs if inp.required]
|
||||
optional_inputs = [inp for inp in inputs if not inp.required]
|
||||
if has_workflows:
|
||||
workflow_map = blocks._workflow_map
|
||||
parts = []
|
||||
|
||||
if required_inputs:
|
||||
inputs_parts.append("**Required:**\n")
|
||||
for inp in required_inputs:
|
||||
if hasattr(inp.type_hint, "__name__"):
|
||||
type_str = inp.type_hint.__name__
|
||||
elif inp.type_hint is not None:
|
||||
type_str = str(inp.type_hint).replace("typing.", "")
|
||||
else:
|
||||
type_str = "Any"
|
||||
desc = inp.description or "No description provided"
|
||||
inputs_parts.append(f"- `{inp.name}` (`{type_str}`): {desc}")
|
||||
# If blocks overrides outputs (e.g. to return just "images" instead of all intermediates),
|
||||
# use that as the shared output for all workflows
|
||||
blocks_outputs = blocks.outputs
|
||||
blocks_intermediate = getattr(blocks, "intermediate_outputs", None)
|
||||
shared_outputs = (
|
||||
blocks_outputs if blocks_intermediate is not None and blocks_outputs != blocks_intermediate else None
|
||||
)
|
||||
|
||||
if optional_inputs:
|
||||
if required_inputs:
|
||||
inputs_parts.append("")
|
||||
inputs_parts.append("**Optional:**\n")
|
||||
for inp in optional_inputs:
|
||||
if hasattr(inp.type_hint, "__name__"):
|
||||
type_str = inp.type_hint.__name__
|
||||
elif inp.type_hint is not None:
|
||||
type_str = str(inp.type_hint).replace("typing.", "")
|
||||
else:
|
||||
type_str = "Any"
|
||||
desc = inp.description or "No description provided"
|
||||
default_str = f", default: `{inp.default}`" if inp.default is not None else ""
|
||||
inputs_parts.append(f"- `{inp.name}` (`{type_str}`){default_str}: {desc}")
|
||||
parts.append("## Workflow Input Specification\n")
|
||||
|
||||
inputs_description = "\n".join(inputs_parts) if inputs_parts else "No specific inputs defined."
|
||||
# Per-workflow details: show trigger inputs with full param descriptions
|
||||
for wf_name, trigger_inputs in workflow_map.items():
|
||||
trigger_input_names = set(trigger_inputs.keys())
|
||||
try:
|
||||
workflow_blocks = blocks.get_workflow(wf_name)
|
||||
except Exception:
|
||||
parts.append(f"<details>\n<summary><strong>{wf_name}</strong></summary>\n")
|
||||
parts.append("*Could not resolve workflow blocks.*\n")
|
||||
parts.append("</details>\n")
|
||||
continue
|
||||
|
||||
# format outputs as markdown list
|
||||
outputs_parts = []
|
||||
for out in outputs:
|
||||
if hasattr(out.type_hint, "__name__"):
|
||||
type_str = out.type_hint.__name__
|
||||
elif out.type_hint is not None:
|
||||
type_str = str(out.type_hint).replace("typing.", "")
|
||||
else:
|
||||
type_str = "Any"
|
||||
desc = out.description or "No description provided"
|
||||
outputs_parts.append(f"- `{out.name}` (`{type_str}`): {desc}")
|
||||
wf_inputs = workflow_blocks.inputs
|
||||
# Show only trigger inputs with full parameter descriptions
|
||||
trigger_params = [p for p in wf_inputs if p.name in trigger_input_names]
|
||||
|
||||
outputs_description = "\n".join(outputs_parts) if outputs_parts else "Standard pipeline outputs."
|
||||
parts.append(f"<details>\n<summary><strong>{wf_name}</strong></summary>\n")
|
||||
|
||||
trigger_inputs_section = ""
|
||||
if hasattr(blocks, "trigger_inputs") and blocks.trigger_inputs:
|
||||
trigger_inputs_list = sorted([t for t in blocks.trigger_inputs if t is not None])
|
||||
if trigger_inputs_list:
|
||||
trigger_inputs_str = ", ".join(f"`{t}`" for t in trigger_inputs_list)
|
||||
trigger_inputs_section = f"""
|
||||
inputs_str = format_params_markdown(trigger_params, header=None)
|
||||
parts.append(inputs_str if inputs_str else "No additional inputs required.")
|
||||
parts.append("")
|
||||
|
||||
parts.append("</details>\n")
|
||||
|
||||
# Common Inputs & Outputs section (like non-workflow pipelines)
|
||||
all_inputs = blocks.inputs
|
||||
all_outputs = shared_outputs if shared_outputs is not None else blocks.outputs
|
||||
|
||||
inputs_str = format_params_markdown(all_inputs, "Inputs")
|
||||
outputs_str = format_params_markdown(all_outputs, "Outputs")
|
||||
inputs_description = inputs_str if inputs_str else "No specific inputs defined."
|
||||
outputs_description = outputs_str if outputs_str else "Standard pipeline outputs."
|
||||
|
||||
parts.append(f"\n## Input/Output Specification\n\n{inputs_description}\n\n{outputs_description}")
|
||||
|
||||
io_specification_section = "\n".join(parts)
|
||||
# Suppress trigger_inputs_section when workflows are shown (it's redundant)
|
||||
trigger_inputs_section = ""
|
||||
else:
|
||||
# Unified I/O section (original behavior)
|
||||
inputs = blocks.inputs
|
||||
outputs = blocks.outputs
|
||||
inputs_str = format_params_markdown(inputs, "Inputs")
|
||||
outputs_str = format_params_markdown(outputs, "Outputs")
|
||||
inputs_description = inputs_str if inputs_str else "No specific inputs defined."
|
||||
outputs_description = outputs_str if outputs_str else "Standard pipeline outputs."
|
||||
io_specification_section = f"## Input/Output Specification\n\n{inputs_description}\n\n{outputs_description}"
|
||||
|
||||
trigger_inputs_section = ""
|
||||
if hasattr(blocks, "trigger_inputs") and blocks.trigger_inputs:
|
||||
trigger_inputs_list = sorted([t for t in blocks.trigger_inputs if t is not None])
|
||||
if trigger_inputs_list:
|
||||
trigger_inputs_str = ", ".join(f"`{t}`" for t in trigger_inputs_list)
|
||||
trigger_inputs_section = f"""
|
||||
### Conditional Execution
|
||||
|
||||
This pipeline contains blocks that are selected at runtime based on inputs:
|
||||
@@ -1178,7 +1229,18 @@ This pipeline contains blocks that are selected at runtime based on inputs:
|
||||
if hasattr(blocks, "model_name") and blocks.model_name:
|
||||
tags.append(blocks.model_name)
|
||||
|
||||
if hasattr(blocks, "trigger_inputs") and blocks.trigger_inputs:
|
||||
if has_workflows:
|
||||
# Derive tags from workflow names
|
||||
workflow_names = set(blocks._workflow_map.keys())
|
||||
if any("inpainting" in wf for wf in workflow_names):
|
||||
tags.append("inpainting")
|
||||
if any("image2image" in wf for wf in workflow_names):
|
||||
tags.append("image-to-image")
|
||||
if any("controlnet" in wf for wf in workflow_names):
|
||||
tags.append("controlnet")
|
||||
if any("text2image" in wf for wf in workflow_names):
|
||||
tags.append("text-to-image")
|
||||
elif hasattr(blocks, "trigger_inputs") and blocks.trigger_inputs:
|
||||
triggers = blocks.trigger_inputs
|
||||
if any(t in triggers for t in ["mask", "mask_image"]):
|
||||
tags.append("inpainting")
|
||||
@@ -1206,8 +1268,7 @@ This pipeline uses a {block_count}-block architecture that can be customized and
|
||||
"blocks_description": blocks_description,
|
||||
"components_description": components_description,
|
||||
"configs_section": configs_section,
|
||||
"inputs_description": inputs_description,
|
||||
"outputs_description": outputs_description,
|
||||
"io_specification_section": io_specification_section,
|
||||
"trigger_inputs_section": trigger_inputs_section,
|
||||
"tags": tags,
|
||||
}
|
||||
|
||||
@@ -31,14 +31,18 @@ class IPNDMScheduler(SchedulerMixin, ConfigMixin):
|
||||
Args:
|
||||
num_train_timesteps (`int`, defaults to 1000):
|
||||
The number of diffusion steps to train the model.
|
||||
trained_betas (`np.ndarray`, *optional*):
|
||||
trained_betas (`np.ndarray` or `List[float]`, *optional*):
|
||||
Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
|
||||
"""
|
||||
|
||||
order = 1
|
||||
|
||||
@register_to_config
|
||||
def __init__(self, num_train_timesteps: int = 1000, trained_betas: np.ndarray | list[float] | None = None):
|
||||
def __init__(
|
||||
self,
|
||||
num_train_timesteps: int = 1000,
|
||||
trained_betas: np.ndarray | list[float] | None = None,
|
||||
):
|
||||
# set `betas`, `alphas`, `timesteps`
|
||||
self.set_timesteps(num_train_timesteps)
|
||||
|
||||
@@ -56,21 +60,29 @@ class IPNDMScheduler(SchedulerMixin, ConfigMixin):
|
||||
self._begin_index = None
|
||||
|
||||
@property
|
||||
def step_index(self):
|
||||
def step_index(self) -> int | None:
|
||||
"""
|
||||
The index counter for current timestep. It will increase 1 after each scheduler step.
|
||||
|
||||
Returns:
|
||||
`int` or `None`:
|
||||
The index counter for current timestep.
|
||||
"""
|
||||
return self._step_index
|
||||
|
||||
@property
|
||||
def begin_index(self):
|
||||
def begin_index(self) -> int | None:
|
||||
"""
|
||||
The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
|
||||
|
||||
Returns:
|
||||
`int` or `None`:
|
||||
The index for the first timestep.
|
||||
"""
|
||||
return self._begin_index
|
||||
|
||||
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index
|
||||
def set_begin_index(self, begin_index: int = 0):
|
||||
def set_begin_index(self, begin_index: int = 0) -> None:
|
||||
"""
|
||||
Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
|
||||
|
||||
@@ -169,7 +181,7 @@ class IPNDMScheduler(SchedulerMixin, ConfigMixin):
|
||||
Args:
|
||||
model_output (`torch.Tensor`):
|
||||
The direct output from learned diffusion model.
|
||||
timestep (`int`):
|
||||
timestep (`int` or `torch.Tensor`):
|
||||
The current discrete timestep in the diffusion chain.
|
||||
sample (`torch.Tensor`):
|
||||
A current instance of a sample created by the diffusion process.
|
||||
@@ -228,7 +240,30 @@ class IPNDMScheduler(SchedulerMixin, ConfigMixin):
|
||||
"""
|
||||
return sample
|
||||
|
||||
def _get_prev_sample(self, sample, timestep_index, prev_timestep_index, ets):
|
||||
def _get_prev_sample(
|
||||
self,
|
||||
sample: torch.Tensor,
|
||||
timestep_index: int,
|
||||
prev_timestep_index: int,
|
||||
ets: torch.Tensor,
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Predicts the previous sample based on the current sample, timestep indices, and running model outputs.
|
||||
|
||||
Args:
|
||||
sample (`torch.Tensor`):
|
||||
The current sample.
|
||||
timestep_index (`int`):
|
||||
Index of the current timestep in the schedule.
|
||||
prev_timestep_index (`int`):
|
||||
Index of the previous timestep in the schedule.
|
||||
ets (`torch.Tensor`):
|
||||
The running sequence of model outputs.
|
||||
|
||||
Returns:
|
||||
`torch.Tensor`:
|
||||
The predicted previous sample.
|
||||
"""
|
||||
alpha = self.alphas[timestep_index]
|
||||
sigma = self.betas[timestep_index]
|
||||
|
||||
@@ -240,5 +275,5 @@ class IPNDMScheduler(SchedulerMixin, ConfigMixin):
|
||||
|
||||
return prev_sample
|
||||
|
||||
def __len__(self):
|
||||
def __len__(self) -> int:
|
||||
return self.config.num_train_timesteps
|
||||
|
||||
@@ -299,7 +299,10 @@ def get_cached_module_file(
|
||||
# Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file.
|
||||
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
|
||||
|
||||
module_file_or_url = os.path.join(pretrained_model_name_or_path, module_file)
|
||||
if subfolder is not None:
|
||||
module_file_or_url = os.path.join(pretrained_model_name_or_path, subfolder, module_file)
|
||||
else:
|
||||
module_file_or_url = os.path.join(pretrained_model_name_or_path, module_file)
|
||||
|
||||
if os.path.isfile(module_file_or_url):
|
||||
resolved_module_file = module_file_or_url
|
||||
@@ -384,7 +387,11 @@ def get_cached_module_file(
|
||||
if not os.path.exists(submodule_path / module_folder):
|
||||
os.makedirs(submodule_path / module_folder)
|
||||
module_needed = f"{module_needed}.py"
|
||||
shutil.copyfile(os.path.join(pretrained_model_name_or_path, module_needed), submodule_path / module_needed)
|
||||
if subfolder is not None:
|
||||
source_path = os.path.join(pretrained_model_name_or_path, subfolder, module_needed)
|
||||
else:
|
||||
source_path = os.path.join(pretrained_model_name_or_path, module_needed)
|
||||
shutil.copyfile(source_path, submodule_path / module_needed)
|
||||
else:
|
||||
# Get the commit hash
|
||||
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
|
||||
|
||||
@@ -107,6 +107,7 @@ def load_or_create_model_card(
|
||||
widget: list[dict] | None = None,
|
||||
inference: bool | None = None,
|
||||
is_modular: bool = False,
|
||||
update_model_card: bool = False,
|
||||
) -> ModelCard:
|
||||
"""
|
||||
Loads or creates a model card.
|
||||
@@ -133,6 +134,9 @@ def load_or_create_model_card(
|
||||
`load_or_create_model_card` from a training script.
|
||||
is_modular: (`bool`, optional): Boolean flag to denote if the model card is for a modular pipeline.
|
||||
When True, uses model_description as-is without additional template formatting.
|
||||
update_model_card: (`bool`, optional): When True, regenerates the model card content even if one
|
||||
already exists on the remote repo. Existing card metadata (tags, license, etc.) is preserved. Only
|
||||
supported for modular pipelines (i.e., `is_modular=True`).
|
||||
"""
|
||||
if not is_jinja_available():
|
||||
raise ValueError(
|
||||
@@ -141,9 +145,17 @@ def load_or_create_model_card(
|
||||
" To install it, please run `pip install Jinja2`."
|
||||
)
|
||||
|
||||
if update_model_card and not is_modular:
|
||||
raise ValueError("`update_model_card=True` is only supported for modular pipelines (`is_modular=True`).")
|
||||
|
||||
try:
|
||||
# Check if the model card is present on the remote repo
|
||||
model_card = ModelCard.load(repo_id_or_path, token=token)
|
||||
# For modular pipelines, regenerate card content when requested (preserve existing metadata)
|
||||
if update_model_card and is_modular and model_description is not None:
|
||||
existing_data = model_card.data
|
||||
model_card = ModelCard(model_description)
|
||||
model_card.data = existing_data
|
||||
except (EntryNotFoundError, RepositoryNotFoundError):
|
||||
# Otherwise create a model card from template
|
||||
if from_training:
|
||||
|
||||
@@ -1,9 +1,15 @@
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import torch
|
||||
from transformers import CLIPTextModel, LongformerModel
|
||||
|
||||
from diffusers import ConfigMixin
|
||||
from diffusers.models import AutoModel, UNet2DConditionModel
|
||||
from diffusers.models.modeling_utils import ModelMixin
|
||||
|
||||
|
||||
class TestAutoModel(unittest.TestCase):
|
||||
@@ -35,6 +41,45 @@ class TestAutoModel(unittest.TestCase):
|
||||
)
|
||||
assert isinstance(model, CLIPTextModel)
|
||||
|
||||
def test_load_dynamic_module_from_local_path_with_subfolder(self):
|
||||
CUSTOM_MODEL_CODE = (
|
||||
"import torch\n"
|
||||
"from diffusers import ModelMixin, ConfigMixin\n"
|
||||
"from diffusers.configuration_utils import register_to_config\n"
|
||||
"\n"
|
||||
"class CustomModel(ModelMixin, ConfigMixin):\n"
|
||||
" @register_to_config\n"
|
||||
" def __init__(self, hidden_size=8):\n"
|
||||
" super().__init__()\n"
|
||||
" self.linear = torch.nn.Linear(hidden_size, hidden_size)\n"
|
||||
"\n"
|
||||
" def forward(self, x):\n"
|
||||
" return self.linear(x)\n"
|
||||
)
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
subfolder = "custom_model"
|
||||
model_dir = os.path.join(tmpdir, subfolder)
|
||||
os.makedirs(model_dir)
|
||||
|
||||
with open(os.path.join(model_dir, "modeling.py"), "w") as f:
|
||||
f.write(CUSTOM_MODEL_CODE)
|
||||
|
||||
config = {
|
||||
"_class_name": "CustomModel",
|
||||
"_diffusers_version": "0.0.0",
|
||||
"auto_map": {"AutoModel": "modeling.CustomModel"},
|
||||
"hidden_size": 8,
|
||||
}
|
||||
with open(os.path.join(model_dir, "config.json"), "w") as f:
|
||||
json.dump(config, f)
|
||||
|
||||
torch.save({}, os.path.join(model_dir, "diffusion_pytorch_model.bin"))
|
||||
|
||||
model = AutoModel.from_pretrained(tmpdir, subfolder=subfolder, trust_remote_code=True)
|
||||
assert model.__class__.__name__ == "CustomModel"
|
||||
assert model.config["hidden_size"] == 8
|
||||
|
||||
|
||||
class TestAutoModelFromConfig(unittest.TestCase):
|
||||
@patch(
|
||||
@@ -100,3 +145,51 @@ class TestAutoModelFromConfig(unittest.TestCase):
|
||||
def test_from_config_raises_on_none(self):
|
||||
with self.assertRaises(ValueError, msg="Please provide a `pretrained_model_name_or_path_or_dict`"):
|
||||
AutoModel.from_config(None)
|
||||
|
||||
|
||||
class TestRegisterForAutoClass(unittest.TestCase):
|
||||
def test_register_for_auto_class_sets_attribute(self):
|
||||
class DummyModel(ModelMixin, ConfigMixin):
|
||||
config_name = "config.json"
|
||||
|
||||
DummyModel.register_for_auto_class("AutoModel")
|
||||
self.assertEqual(DummyModel._auto_class, "AutoModel")
|
||||
|
||||
def test_register_for_auto_class_rejects_unsupported(self):
|
||||
class DummyModel(ModelMixin, ConfigMixin):
|
||||
config_name = "config.json"
|
||||
|
||||
with self.assertRaises(ValueError, msg="Only 'AutoModel' is supported"):
|
||||
DummyModel.register_for_auto_class("AutoPipeline")
|
||||
|
||||
def test_auto_map_in_saved_config(self):
|
||||
class DummyModel(ModelMixin, ConfigMixin):
|
||||
config_name = "config.json"
|
||||
|
||||
DummyModel.register_for_auto_class("AutoModel")
|
||||
model = DummyModel()
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
model.save_config(tmpdir)
|
||||
config_path = os.path.join(tmpdir, "config.json")
|
||||
with open(config_path, "r") as f:
|
||||
config = json.load(f)
|
||||
|
||||
self.assertIn("auto_map", config)
|
||||
self.assertIn("AutoModel", config["auto_map"])
|
||||
module_name = DummyModel.__module__.split(".")[-1]
|
||||
self.assertEqual(config["auto_map"]["AutoModel"], f"{module_name}.DummyModel")
|
||||
|
||||
def test_no_auto_map_without_register(self):
|
||||
class DummyModel(ModelMixin, ConfigMixin):
|
||||
config_name = "config.json"
|
||||
|
||||
model = DummyModel()
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
model.save_config(tmpdir)
|
||||
config_path = os.path.join(tmpdir, "config.json")
|
||||
with open(config_path, "r") as f:
|
||||
config = json.load(f)
|
||||
|
||||
self.assertNotIn("auto_map", config)
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import random
|
||||
import tempfile
|
||||
|
||||
import numpy as np
|
||||
import PIL
|
||||
@@ -128,16 +129,18 @@ class TestFluxImg2ImgModularPipelineFast(ModularPipelineTesterMixin):
|
||||
|
||||
return inputs
|
||||
|
||||
def test_save_from_pretrained(self, tmp_path):
|
||||
def test_save_from_pretrained(self):
|
||||
pipes = []
|
||||
base_pipe = self.get_pipeline().to(torch_device)
|
||||
pipes.append(base_pipe)
|
||||
|
||||
base_pipe.save_pretrained(tmp_path)
|
||||
pipe = ModularPipeline.from_pretrained(tmp_path).to(torch_device)
|
||||
pipe.load_components(torch_dtype=torch.float32)
|
||||
pipe.to(torch_device)
|
||||
pipe.image_processor = VaeImageProcessor(vae_scale_factor=2)
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
base_pipe.save_pretrained(tmpdirname)
|
||||
|
||||
pipe = ModularPipeline.from_pretrained(tmpdirname).to(torch_device)
|
||||
pipe.load_components(torch_dtype=torch.float32)
|
||||
pipe.to(torch_device)
|
||||
pipe.image_processor = VaeImageProcessor(vae_scale_factor=2)
|
||||
|
||||
pipes.append(pipe)
|
||||
|
||||
@@ -209,16 +212,18 @@ class TestFluxKontextModularPipelineFast(ModularPipelineTesterMixin):
|
||||
|
||||
return inputs
|
||||
|
||||
def test_save_from_pretrained(self, tmp_path):
|
||||
def test_save_from_pretrained(self):
|
||||
pipes = []
|
||||
base_pipe = self.get_pipeline().to(torch_device)
|
||||
pipes.append(base_pipe)
|
||||
|
||||
base_pipe.save_pretrained(tmp_path)
|
||||
pipe = ModularPipeline.from_pretrained(tmp_path).to(torch_device)
|
||||
pipe.load_components(torch_dtype=torch.float32)
|
||||
pipe.to(torch_device)
|
||||
pipe.image_processor = VaeImageProcessor(vae_scale_factor=2)
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
base_pipe.save_pretrained(tmpdirname)
|
||||
|
||||
pipe = ModularPipeline.from_pretrained(tmpdirname).to(torch_device)
|
||||
pipe.load_components(torch_dtype=torch.float32)
|
||||
pipe.to(torch_device)
|
||||
pipe.image_processor = VaeImageProcessor(vae_scale_factor=2)
|
||||
|
||||
pipes.append(pipe)
|
||||
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import gc
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from typing import Callable
|
||||
|
||||
import pytest
|
||||
@@ -328,15 +330,16 @@ class ModularPipelineTesterMixin:
|
||||
|
||||
assert torch.abs(image_slices[0] - image_slices[1]).max() < 1e-3
|
||||
|
||||
def test_save_from_pretrained(self, tmp_path):
|
||||
def test_save_from_pretrained(self):
|
||||
pipes = []
|
||||
base_pipe = self.get_pipeline().to(torch_device)
|
||||
pipes.append(base_pipe)
|
||||
|
||||
base_pipe.save_pretrained(tmp_path)
|
||||
pipe = ModularPipeline.from_pretrained(tmp_path).to(torch_device)
|
||||
pipe.load_components(torch_dtype=torch.float32)
|
||||
pipe.to(torch_device)
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
base_pipe.save_pretrained(tmpdirname)
|
||||
pipe = ModularPipeline.from_pretrained(tmpdirname).to(torch_device)
|
||||
pipe.load_components(torch_dtype=torch.float32)
|
||||
pipe.to(torch_device)
|
||||
|
||||
pipes.append(pipe)
|
||||
|
||||
@@ -348,31 +351,32 @@ class ModularPipelineTesterMixin:
|
||||
|
||||
assert torch.abs(image_slices[0] - image_slices[1]).max() < 1e-3
|
||||
|
||||
def test_modular_index_consistency(self, tmp_path):
|
||||
def test_modular_index_consistency(self):
|
||||
pipe = self.get_pipeline()
|
||||
components_spec = pipe._component_specs
|
||||
components = sorted(components_spec.keys())
|
||||
|
||||
pipe.save_pretrained(tmp_path)
|
||||
index_file = tmp_path / "modular_model_index.json"
|
||||
assert index_file.exists()
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
pipe.save_pretrained(tmpdir)
|
||||
index_file = os.path.join(tmpdir, "modular_model_index.json")
|
||||
assert os.path.exists(index_file)
|
||||
|
||||
with open(index_file) as f:
|
||||
index_contents = json.load(f)
|
||||
with open(index_file) as f:
|
||||
index_contents = json.load(f)
|
||||
|
||||
compulsory_keys = {"_blocks_class_name", "_class_name", "_diffusers_version"}
|
||||
for k in compulsory_keys:
|
||||
assert k in index_contents
|
||||
compulsory_keys = {"_blocks_class_name", "_class_name", "_diffusers_version"}
|
||||
for k in compulsory_keys:
|
||||
assert k in index_contents
|
||||
|
||||
to_check_attrs = {"pretrained_model_name_or_path", "revision", "subfolder"}
|
||||
for component in components:
|
||||
spec = components_spec[component]
|
||||
for attr in to_check_attrs:
|
||||
if getattr(spec, "pretrained_model_name_or_path", None) is not None:
|
||||
for attr in to_check_attrs:
|
||||
assert component in index_contents, f"{component} should be present in index but isn't."
|
||||
attr_value_from_index = index_contents[component][2][attr]
|
||||
assert getattr(spec, attr) == attr_value_from_index
|
||||
to_check_attrs = {"pretrained_model_name_or_path", "revision", "subfolder"}
|
||||
for component in components:
|
||||
spec = components_spec[component]
|
||||
for attr in to_check_attrs:
|
||||
if getattr(spec, "pretrained_model_name_or_path", None) is not None:
|
||||
for attr in to_check_attrs:
|
||||
assert component in index_contents, f"{component} should be present in index but isn't."
|
||||
attr_value_from_index = index_contents[component][2][attr]
|
||||
assert getattr(spec, attr) == attr_value_from_index
|
||||
|
||||
def test_workflow_map(self):
|
||||
blocks = self.pipeline_blocks_class()
|
||||
@@ -479,8 +483,7 @@ class TestModularModelCardContent:
|
||||
"blocks_description",
|
||||
"components_description",
|
||||
"configs_section",
|
||||
"inputs_description",
|
||||
"outputs_description",
|
||||
"io_specification_section",
|
||||
"trigger_inputs_section",
|
||||
"tags",
|
||||
]
|
||||
@@ -577,18 +580,19 @@ class TestModularModelCardContent:
|
||||
blocks = self.create_mock_blocks(inputs=inputs)
|
||||
content = generate_modular_model_card_content(blocks)
|
||||
|
||||
assert "**Required:**" in content["inputs_description"]
|
||||
assert "**Optional:**" in content["inputs_description"]
|
||||
assert "prompt" in content["inputs_description"]
|
||||
assert "num_steps" in content["inputs_description"]
|
||||
assert "default: `50`" in content["inputs_description"]
|
||||
io_section = content["io_specification_section"]
|
||||
assert "**Inputs:**" in io_section
|
||||
assert "prompt" in io_section
|
||||
assert "num_steps" in io_section
|
||||
assert "*optional*" in io_section
|
||||
assert "defaults to `50`" in io_section
|
||||
|
||||
def test_inputs_description_empty(self):
|
||||
"""Test handling of pipelines without specific inputs."""
|
||||
blocks = self.create_mock_blocks(inputs=[])
|
||||
content = generate_modular_model_card_content(blocks)
|
||||
|
||||
assert "No specific inputs defined" in content["inputs_description"]
|
||||
assert "No specific inputs defined" in content["io_specification_section"]
|
||||
|
||||
def test_outputs_description_formatting(self):
|
||||
"""Test that outputs are correctly formatted."""
|
||||
@@ -598,15 +602,16 @@ class TestModularModelCardContent:
|
||||
blocks = self.create_mock_blocks(outputs=outputs)
|
||||
content = generate_modular_model_card_content(blocks)
|
||||
|
||||
assert "images" in content["outputs_description"]
|
||||
assert "Generated images" in content["outputs_description"]
|
||||
io_section = content["io_specification_section"]
|
||||
assert "images" in io_section
|
||||
assert "Generated images" in io_section
|
||||
|
||||
def test_outputs_description_empty(self):
|
||||
"""Test handling of pipelines without specific outputs."""
|
||||
blocks = self.create_mock_blocks(outputs=[])
|
||||
content = generate_modular_model_card_content(blocks)
|
||||
|
||||
assert "Standard pipeline outputs" in content["outputs_description"]
|
||||
assert "Standard pipeline outputs" in content["io_specification_section"]
|
||||
|
||||
def test_trigger_inputs_section_with_triggers(self):
|
||||
"""Test that trigger inputs section is generated when present."""
|
||||
@@ -624,35 +629,6 @@ class TestModularModelCardContent:
|
||||
|
||||
assert content["trigger_inputs_section"] == ""
|
||||
|
||||
def test_blocks_description_with_sub_blocks(self):
|
||||
"""Test that blocks with sub-blocks are correctly described."""
|
||||
|
||||
class MockBlockWithSubBlocks:
|
||||
def __init__(self):
|
||||
self.__class__.__name__ = "ParentBlock"
|
||||
self.description = "Parent block"
|
||||
self.sub_blocks = {
|
||||
"child1": self.create_child_block("ChildBlock1", "Child 1 description"),
|
||||
"child2": self.create_child_block("ChildBlock2", "Child 2 description"),
|
||||
}
|
||||
|
||||
def create_child_block(self, name, desc):
|
||||
class ChildBlock:
|
||||
def __init__(self):
|
||||
self.__class__.__name__ = name
|
||||
self.description = desc
|
||||
|
||||
return ChildBlock()
|
||||
|
||||
blocks = self.create_mock_blocks()
|
||||
blocks.sub_blocks["parent"] = MockBlockWithSubBlocks()
|
||||
|
||||
content = generate_modular_model_card_content(blocks)
|
||||
|
||||
assert "parent" in content["blocks_description"]
|
||||
assert "child1" in content["blocks_description"]
|
||||
assert "child2" in content["blocks_description"]
|
||||
|
||||
def test_model_description_includes_block_count(self):
|
||||
"""Test that model description includes the number of blocks."""
|
||||
blocks = self.create_mock_blocks(num_blocks=5)
|
||||
@@ -726,6 +702,82 @@ class TestLoadComponentsSkipBehavior:
|
||||
assert not hasattr(pipe, "test_component") or pipe.test_component is None
|
||||
|
||||
|
||||
class TestCustomModelSavePretrained:
|
||||
def test_save_pretrained_updates_index_for_local_model(self, tmp_path):
|
||||
"""When a component without _diffusers_load_id (custom/local model) is saved,
|
||||
modular_model_index.json should point to the save directory."""
|
||||
import json
|
||||
|
||||
pipe = ModularPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-xl-pipe")
|
||||
pipe.load_components(torch_dtype=torch.float32)
|
||||
|
||||
pipe.unet._diffusers_load_id = "null"
|
||||
|
||||
save_dir = str(tmp_path / "my-pipeline")
|
||||
pipe.save_pretrained(save_dir)
|
||||
|
||||
with open(os.path.join(save_dir, "modular_model_index.json")) as f:
|
||||
index = json.load(f)
|
||||
|
||||
_library, _cls, unet_spec = index["unet"]
|
||||
assert unet_spec["pretrained_model_name_or_path"] == save_dir
|
||||
assert unet_spec["subfolder"] == "unet"
|
||||
|
||||
_library, _cls, vae_spec = index["vae"]
|
||||
assert vae_spec["pretrained_model_name_or_path"] == "hf-internal-testing/tiny-stable-diffusion-xl-pipe"
|
||||
|
||||
def test_save_pretrained_roundtrip_with_local_model(self, tmp_path):
|
||||
"""A pipeline with a custom/local model should be saveable and re-loadable with identical outputs."""
|
||||
pipe = ModularPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-xl-pipe")
|
||||
pipe.load_components(torch_dtype=torch.float32)
|
||||
|
||||
pipe.unet._diffusers_load_id = "null"
|
||||
|
||||
original_state_dict = pipe.unet.state_dict()
|
||||
|
||||
save_dir = str(tmp_path / "my-pipeline")
|
||||
pipe.save_pretrained(save_dir)
|
||||
|
||||
loaded_pipe = ModularPipeline.from_pretrained(save_dir)
|
||||
loaded_pipe.load_components(torch_dtype=torch.float32)
|
||||
|
||||
assert loaded_pipe.unet is not None
|
||||
assert loaded_pipe.unet.__class__.__name__ == pipe.unet.__class__.__name__
|
||||
|
||||
loaded_state_dict = loaded_pipe.unet.state_dict()
|
||||
assert set(original_state_dict.keys()) == set(loaded_state_dict.keys())
|
||||
for key in original_state_dict:
|
||||
assert torch.equal(original_state_dict[key], loaded_state_dict[key]), f"Mismatch in {key}"
|
||||
|
||||
def test_save_pretrained_overwrite_modular_index(self, tmp_path):
|
||||
"""With overwrite_modular_index=True, all component references should point to the save directory."""
|
||||
import json
|
||||
|
||||
pipe = ModularPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-xl-pipe")
|
||||
pipe.load_components(torch_dtype=torch.float32)
|
||||
|
||||
save_dir = str(tmp_path / "my-pipeline")
|
||||
pipe.save_pretrained(save_dir, overwrite_modular_index=True)
|
||||
|
||||
with open(os.path.join(save_dir, "modular_model_index.json")) as f:
|
||||
index = json.load(f)
|
||||
|
||||
for component_name in ["unet", "vae", "text_encoder", "text_encoder_2"]:
|
||||
if component_name not in index:
|
||||
continue
|
||||
_library, _cls, spec = index[component_name]
|
||||
assert spec["pretrained_model_name_or_path"] == save_dir, (
|
||||
f"{component_name} should point to save dir but got {spec['pretrained_model_name_or_path']}"
|
||||
)
|
||||
assert spec["subfolder"] == component_name
|
||||
|
||||
loaded_pipe = ModularPipeline.from_pretrained(save_dir)
|
||||
loaded_pipe.load_components(torch_dtype=torch.float32)
|
||||
|
||||
assert loaded_pipe.unet is not None
|
||||
assert loaded_pipe.vae is not None
|
||||
|
||||
|
||||
class TestModularPipelineInitFallback:
|
||||
"""Test that ModularPipeline.__init__ falls back to default_blocks_name when
|
||||
_blocks_class_name is a base class (e.g. SequentialPipelineBlocks saved by from_blocks_dict)."""
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from collections import deque
|
||||
from typing import List
|
||||
|
||||
@@ -152,24 +153,25 @@ class TestModularCustomBlocks:
|
||||
output_prompt = output.values["output_prompt"]
|
||||
assert output_prompt.startswith("Modular diffusers + ")
|
||||
|
||||
def test_custom_block_saving_loading(self, tmp_path):
|
||||
def test_custom_block_saving_loading(self):
|
||||
custom_block = DummyCustomBlockSimple()
|
||||
|
||||
custom_block.save_pretrained(tmp_path)
|
||||
assert any("modular_config.json" in k for k in os.listdir(tmp_path))
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
custom_block.save_pretrained(tmpdir)
|
||||
assert any("modular_config.json" in k for k in os.listdir(tmpdir))
|
||||
|
||||
with open(os.path.join(tmp_path, "modular_config.json"), "r") as f:
|
||||
config = json.load(f)
|
||||
auto_map = config["auto_map"]
|
||||
assert auto_map == {"ModularPipelineBlocks": "test_modular_pipelines_custom_blocks.DummyCustomBlockSimple"}
|
||||
with open(os.path.join(tmpdir, "modular_config.json"), "r") as f:
|
||||
config = json.load(f)
|
||||
auto_map = config["auto_map"]
|
||||
assert auto_map == {"ModularPipelineBlocks": "test_modular_pipelines_custom_blocks.DummyCustomBlockSimple"}
|
||||
|
||||
# For now, the Python script that implements the custom block has to be manually pushed to the Hub.
|
||||
# This is why, we have to separately save the Python script here.
|
||||
code_path = os.path.join(tmp_path, "test_modular_pipelines_custom_blocks.py")
|
||||
with open(code_path, "w") as f:
|
||||
f.write(CODE_STR)
|
||||
# For now, the Python script that implements the custom block has to be manually pushed to the Hub.
|
||||
# This is why, we have to separately save the Python script here.
|
||||
code_path = os.path.join(tmpdir, "test_modular_pipelines_custom_blocks.py")
|
||||
with open(code_path, "w") as f:
|
||||
f.write(CODE_STR)
|
||||
|
||||
loaded_custom_block = ModularPipelineBlocks.from_pretrained(tmp_path, trust_remote_code=True)
|
||||
loaded_custom_block = ModularPipelineBlocks.from_pretrained(tmpdir, trust_remote_code=True)
|
||||
|
||||
pipe = loaded_custom_block.init_pipeline()
|
||||
prompt = "Diffusers is nice"
|
||||
|
||||
@@ -74,7 +74,7 @@ if is_torchao_available():
|
||||
|
||||
@require_torch
|
||||
@require_torch_accelerator
|
||||
@require_torchao_version_greater_or_equal("0.7.0")
|
||||
@require_torchao_version_greater_or_equal("0.14.0")
|
||||
class TorchAoConfigTest(unittest.TestCase):
|
||||
def test_to_dict(self):
|
||||
"""
|
||||
@@ -132,7 +132,7 @@ class TorchAoConfigTest(unittest.TestCase):
|
||||
# Slices for these tests have been obtained on our aws-g6e-xlarge-plus runners
|
||||
@require_torch
|
||||
@require_torch_accelerator
|
||||
@require_torchao_version_greater_or_equal("0.7.0")
|
||||
@require_torchao_version_greater_or_equal("0.14.0")
|
||||
class TorchAoTest(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
gc.collect()
|
||||
@@ -587,7 +587,7 @@ class TorchAoTest(unittest.TestCase):
|
||||
# Slices for these tests have been obtained on our aws-g6e-xlarge-plus runners
|
||||
@require_torch
|
||||
@require_torch_accelerator
|
||||
@require_torchao_version_greater_or_equal("0.7.0")
|
||||
@require_torchao_version_greater_or_equal("0.14.0")
|
||||
class TorchAoSerializationTest(unittest.TestCase):
|
||||
model_name = "hf-internal-testing/tiny-flux-pipe"
|
||||
|
||||
@@ -698,23 +698,22 @@ class TorchAoSerializationTest(unittest.TestCase):
|
||||
self._check_serialization_expected_slice(quant_method, quant_method_kwargs, expected_slice, device)
|
||||
|
||||
|
||||
@require_torchao_version_greater_or_equal("0.7.0")
|
||||
@require_torchao_version_greater_or_equal("0.14.0")
|
||||
class TorchAoCompileTest(QuantCompileTests, unittest.TestCase):
|
||||
@property
|
||||
def quantization_config(self):
|
||||
return PipelineQuantizationConfig(
|
||||
quant_mapping={
|
||||
"transformer": TorchAoConfig(quant_type="int8_weight_only"),
|
||||
},
|
||||
quant_mapping={"transformer": TorchAoConfig(Int8WeightOnlyConfig())},
|
||||
)
|
||||
|
||||
@unittest.skip(
|
||||
"Changing the device of AQT tensor with module._apply (called from doing module.to() in accelerate) does not work "
|
||||
"when compiling."
|
||||
)
|
||||
def test_torch_compile_with_cpu_offload(self):
|
||||
pipe = self._init_pipeline(self.quantization_config, torch.bfloat16)
|
||||
pipe.enable_model_cpu_offload()
|
||||
# No compilation because it fails with:
|
||||
# RuntimeError: _apply(): Couldn't swap Linear.weight
|
||||
super().test_torch_compile_with_cpu_offload()
|
||||
|
||||
# small resolutions to ensure speedy execution.
|
||||
pipe("a dog", num_inference_steps=2, max_sequence_length=16, height=256, width=256)
|
||||
|
||||
@parameterized.expand([False, True])
|
||||
@unittest.skip(
|
||||
@@ -745,7 +744,7 @@ class TorchAoCompileTest(QuantCompileTests, unittest.TestCase):
|
||||
# Slices for these tests have been obtained on our aws-g6e-xlarge-plus runners
|
||||
@require_torch
|
||||
@require_torch_accelerator
|
||||
@require_torchao_version_greater_or_equal("0.7.0")
|
||||
@require_torchao_version_greater_or_equal("0.14.0")
|
||||
@slow
|
||||
@nightly
|
||||
class SlowTorchAoTests(unittest.TestCase):
|
||||
@@ -907,7 +906,7 @@ class SlowTorchAoTests(unittest.TestCase):
|
||||
|
||||
@require_torch
|
||||
@require_torch_accelerator
|
||||
@require_torchao_version_greater_or_equal("0.7.0")
|
||||
@require_torchao_version_greater_or_equal("0.14.0")
|
||||
@slow
|
||||
@nightly
|
||||
class SlowTorchAoPreserializedModelTests(unittest.TestCase):
|
||||
|
||||
Reference in New Issue
Block a user