Compare commits

...

22 Commits

Author SHA1 Message Date
Daniel Gu
65597652a7 Remove unused index_type arg to preprocess_conditions 2026-02-16 02:14:28 +01:00
Daniel Gu
b4e7815306 make style and make quality 2026-02-16 01:49:18 +01:00
Daniel Gu
49ef4c5ba2 Duplicate video and audio position ids if using CFG 2026-02-16 01:48:27 +01:00
Daniel Gu
df2ca6ed22 Center crop LTX-2 conditions following original code 2026-02-16 01:33:14 +01:00
Daniel Gu
ca931c6416 Forward kwargs from preprocess/postprocess_video to preprocess/postprocess resp. 2026-02-16 01:17:52 +01:00
Daniel Gu
e8c5ee0c6e Merge branch 'main' into ltx2-add-condition-pipeline 2026-02-16 01:08:44 +01:00
Daniel Gu
1c120c6ad9 Migrate to Python 3.9+ style type annotations without explicit typing imports 2026-02-14 04:50:35 +01:00
Daniel Gu
1cdea99b8b make fix-copies 2026-02-14 04:27:05 +01:00
Daniel Gu
8ba350cb47 Merge branch 'main' into ltx2-add-condition-pipeline 2026-02-14 04:26:09 +01:00
dg845
83c8ae6b29 Apply suggestions from code review
Co-authored-by: Álvaro Somoza <asomoza@users.noreply.github.com>
2026-02-13 19:15:08 -08:00
Daniel Gu
98f74b2fe4 Improve comment on using the conditioning mask in denoising loop 2026-02-05 06:48:27 +01:00
Daniel Gu
33e6ec1f85 Put latent_idx_from_index logic inline 2026-02-05 06:37:56 +01:00
Daniel Gu
2e824f561a Remove support for image and video in __call__ 2026-02-05 06:30:42 +01:00
Daniel Gu
d39d89f0d6 Update LTX2ConditionPipeline example 2026-02-05 02:16:01 +01:00
Daniel Gu
45051e18f5 Rename LTX2VideoCondition image to frames 2026-02-05 02:05:31 +01:00
Sayak Paul
70dff16996 Merge branch 'main' into ltx2-add-condition-pipeline 2026-02-04 15:48:36 +05:30
Daniel Gu
e0bd6a07f7 make fix-copies 2026-02-04 06:52:04 +01:00
Daniel Gu
5577e08433 make style and make quality 2026-02-04 06:48:36 +01:00
Daniel Gu
5368d73f7e Blend denoising output and clean latents in sample space instead of velocity space 2026-02-04 05:47:38 +01:00
Daniel Gu
ed52c0d7cc Implement LTX-2-style general image conditioning 2026-02-03 09:48:38 +01:00
Daniel Gu
02c750b590 Fix pipeline import error 2026-02-03 08:27:42 +01:00
Daniel Gu
2cc7e116ef LTX2 condition pipeline initial commit 2026-01-30 08:16:16 +01:00
6 changed files with 1526 additions and 8 deletions

View File

@@ -569,6 +569,7 @@ else:
"LEditsPPPipelineStableDiffusionXL",
"LongCatImageEditPipeline",
"LongCatImagePipeline",
"LTX2ConditionPipeline",
"LTX2ImageToVideoPipeline",
"LTX2LatentUpsamplePipeline",
"LTX2Pipeline",
@@ -1324,6 +1325,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
LEditsPPPipelineStableDiffusionXL,
LongCatImageEditPipeline,
LongCatImagePipeline,
LTX2ConditionPipeline,
LTX2ImageToVideoPipeline,
LTX2LatentUpsamplePipeline,
LTX2Pipeline,

View File

@@ -292,7 +292,12 @@ else:
"LTXLatentUpsamplePipeline",
"LTXI2VLongMultiPromptPipeline",
]
_import_structure["ltx2"] = ["LTX2Pipeline", "LTX2ImageToVideoPipeline", "LTX2LatentUpsamplePipeline"]
_import_structure["ltx2"] = [
"LTX2Pipeline",
"LTX2ConditionPipeline",
"LTX2ImageToVideoPipeline",
"LTX2LatentUpsamplePipeline",
]
_import_structure["lumina"] = ["LuminaPipeline", "LuminaText2ImgPipeline"]
_import_structure["lumina2"] = ["Lumina2Pipeline", "Lumina2Text2ImgPipeline"]
_import_structure["lucy"] = ["LucyEditPipeline"]
@@ -745,7 +750,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
LTXLatentUpsamplePipeline,
LTXPipeline,
)
from .ltx2 import LTX2ImageToVideoPipeline, LTX2LatentUpsamplePipeline, LTX2Pipeline
from .ltx2 import LTX2ConditionPipeline, LTX2ImageToVideoPipeline, LTX2LatentUpsamplePipeline, LTX2Pipeline
from .lucy import LucyEditPipeline
from .lumina import LuminaPipeline, LuminaText2ImgPipeline
from .lumina2 import Lumina2Pipeline, Lumina2Text2ImgPipeline

View File

@@ -25,6 +25,7 @@ else:
_import_structure["connectors"] = ["LTX2TextConnectors"]
_import_structure["latent_upsampler"] = ["LTX2LatentUpsamplerModel"]
_import_structure["pipeline_ltx2"] = ["LTX2Pipeline"]
_import_structure["pipeline_ltx2_condition"] = ["LTX2ConditionPipeline"]
_import_structure["pipeline_ltx2_image2video"] = ["LTX2ImageToVideoPipeline"]
_import_structure["pipeline_ltx2_latent_upsample"] = ["LTX2LatentUpsamplePipeline"]
_import_structure["vocoder"] = ["LTX2Vocoder"]
@@ -40,6 +41,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
from .connectors import LTX2TextConnectors
from .latent_upsampler import LTX2LatentUpsamplerModel
from .pipeline_ltx2 import LTX2Pipeline
from .pipeline_ltx2_condition import LTX2ConditionPipeline
from .pipeline_ltx2_image2video import LTX2ImageToVideoPipeline
from .pipeline_ltx2_latent_upsample import LTX2LatentUpsamplePipeline
from .vocoder import LTX2Vocoder

File diff suppressed because it is too large Load Diff

View File

@@ -2117,6 +2117,21 @@ class LongCatImagePipeline(metaclass=DummyObject):
requires_backends(cls, ["torch", "transformers"])
class LTX2ConditionPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class LTX2ImageToVideoPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]

View File

@@ -25,9 +25,9 @@ from .image_processor import VaeImageProcessor, is_valid_image, is_valid_image_i
class VideoProcessor(VaeImageProcessor):
r"""Simple video processor."""
def preprocess_video(self, video, height: int | None = None, width: int | None = None) -> torch.Tensor:
def preprocess_video(self, video, height: int | None = None, width: int | None = None, **kwargs) -> torch.Tensor:
r"""
Preprocesses input video(s).
Preprocesses input video(s). Keyword arguments will be forwarded to `VaeImageProcessor.preprocess`.
Args:
video (`list[PIL.Image]`, `list[list[PIL.Image]]`, `torch.Tensor`, `np.array`, `list[torch.Tensor]`, `list[np.array]`):
@@ -49,6 +49,10 @@ class VideoProcessor(VaeImageProcessor):
width (`int`, *optional*`, defaults to `None`):
The width in preprocessed frames of the video. If `None`, will use get_default_height_width()` to get
the default width.
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, num_frames, height, width)`:
A 5D tensor holding the batched channels-first video(s).
"""
if isinstance(video, list) and isinstance(video[0], np.ndarray) and video[0].ndim == 5:
warnings.warn(
@@ -79,7 +83,7 @@ class VideoProcessor(VaeImageProcessor):
"Input is in incorrect format. Currently, we only support numpy.ndarray, torch.Tensor, PIL.Image.Image"
)
video = torch.stack([self.preprocess(img, height=height, width=width) for img in video], dim=0)
video = torch.stack([self.preprocess(img, height=height, width=width, **kwargs) for img in video], dim=0)
# move the number of channels before the number of frames.
video = video.permute(0, 2, 1, 3, 4)
@@ -87,10 +91,11 @@ class VideoProcessor(VaeImageProcessor):
return video
def postprocess_video(
self, video: torch.Tensor, output_type: str = "np"
self, video: torch.Tensor, output_type: str = "np", **kwargs
) -> np.ndarray | torch.Tensor | list[PIL.Image.Image]:
r"""
Converts a video tensor to a list of frames for export.
Converts a video tensor to a list of frames for export. Keyword arguments will be forwarded to
`VaeImageProcessor.postprocess`.
Args:
video (`torch.Tensor`): The video as a tensor.
@@ -100,7 +105,7 @@ class VideoProcessor(VaeImageProcessor):
outputs = []
for batch_idx in range(batch_size):
batch_vid = video[batch_idx].permute(1, 0, 2, 3)
batch_output = self.postprocess(batch_vid, output_type)
batch_output = self.postprocess(batch_vid, output_type, **kwargs)
outputs.append(batch_output)
if output_type == "np":