mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-18 17:45:52 +08:00
* Basic implementation of request scheduling * Basic editing in SD and Flux Pipelines * Small Fix * Fix * Update for more pipelines * Add examples/server-async * Add examples/server-async * Updated RequestScopedPipeline to handle a single tokenizer lock to avoid race conditions * Fix * Fix _TokenizerLockWrapper * Fix _TokenizerLockWrapper * Delete _TokenizerLockWrapper * Fix tokenizer * Update examples/server-async * Fix server-async * Optimizations in examples/server-async * We keep the implementation simple in examples/server-async * Update examples/server-async/README.md * Update examples/server-async/README.md for changes to tokenizer locks and backward-compatible retrieve_timesteps * The changes to the diffusers core have been undone and all logic is being moved to exmaples/server-async * Update examples/server-async/utils/* * Fix BaseAsyncScheduler * Rollback in the core of the diffusers * Update examples/server-async/README.md * Complete rollback of diffusers core files * Simple implementation of an asynchronous server compatible with SD3-3.5 and Flux Pipelines * Update examples/server-async/README.md * Fixed import errors in 'examples/server-async/serverasync.py' * Flux Pipeline Discard * Update examples/server-async/README.md * Apply style fixes --------- Co-authored-by: Sayak Paul <spsayakpaul@gmail.com> Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
49 lines
1.3 KiB
Python
49 lines
1.3 KiB
Python
import gc
|
|
import logging
|
|
import os
|
|
import tempfile
|
|
import uuid
|
|
|
|
import torch
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class Utils:
|
|
def __init__(self, host: str = "0.0.0.0", port: int = 8500):
|
|
self.service_url = f"http://{host}:{port}"
|
|
self.image_dir = os.path.join(tempfile.gettempdir(), "images")
|
|
if not os.path.exists(self.image_dir):
|
|
os.makedirs(self.image_dir)
|
|
|
|
self.video_dir = os.path.join(tempfile.gettempdir(), "videos")
|
|
if not os.path.exists(self.video_dir):
|
|
os.makedirs(self.video_dir)
|
|
|
|
def save_image(self, image):
|
|
if hasattr(image, "to"):
|
|
try:
|
|
image = image.to("cpu")
|
|
except Exception:
|
|
pass
|
|
|
|
if isinstance(image, torch.Tensor):
|
|
from torchvision import transforms
|
|
|
|
to_pil = transforms.ToPILImage()
|
|
image = to_pil(image.squeeze(0).clamp(0, 1))
|
|
|
|
filename = "img" + str(uuid.uuid4()).split("-")[0] + ".png"
|
|
image_path = os.path.join(self.image_dir, filename)
|
|
logger.info(f"Saving image to {image_path}")
|
|
|
|
image.save(image_path, format="PNG", optimize=True)
|
|
|
|
del image
|
|
gc.collect()
|
|
if torch.cuda.is_available():
|
|
torch.cuda.empty_cache()
|
|
|
|
return os.path.join(self.service_url, "images", filename)
|