From 26e0f531a7ae35ef16be9bc61690ec0bbb53b525 Mon Sep 17 00:00:00 2001 From: hlky Date: Wed, 19 Feb 2025 08:19:56 +0000 Subject: [PATCH 1/8] Rename Lumina(2)Text2ImgPipeline -> Lumina(2)Pipeline --- docs/source/en/api/pipelines/lumina.md | 14 ++++----- docs/source/en/api/pipelines/lumina2.md | 12 ++++---- scripts/convert_lumina_to_diffusers.py | 4 +-- src/diffusers/__init__.py | 4 +++ src/diffusers/pipelines/__init__.py | 8 ++--- src/diffusers/pipelines/auto_pipeline.py | 8 ++--- src/diffusers/pipelines/lumina/__init__.py | 4 +-- .../pipelines/lumina/pipeline_lumina.py | 14 +++++++-- src/diffusers/pipelines/lumina2/__init__.py | 4 +-- .../pipelines/lumina2/pipeline_lumina2.py | 14 +++++++-- .../dummy_torch_and_transformers_objects.py | 30 +++++++++++++++++++ tests/pipelines/lumina/test_lumina_nextdit.py | 10 +++---- .../lumina2/test_pipeline_lumina2.py | 6 ++-- 13 files changed, 91 insertions(+), 41 deletions(-) diff --git a/docs/source/en/api/pipelines/lumina.md b/docs/source/en/api/pipelines/lumina.md index 1967e85f173a..ce5cf8b103cc 100644 --- a/docs/source/en/api/pipelines/lumina.md +++ b/docs/source/en/api/pipelines/lumina.md @@ -58,10 +58,10 @@ Use [`torch.compile`](https://huggingface.co/docs/diffusers/main/en/tutorials/fa First, load the pipeline: ```python -from diffusers import LuminaText2ImgPipeline +from diffusers import LuminaPipeline import torch -pipeline = LuminaText2ImgPipeline.from_pretrained( +pipeline = LuminaPipeline.from_pretrained( "Alpha-VLLM/Lumina-Next-SFT-diffusers", torch_dtype=torch.bfloat16 ).to("cuda") ``` @@ -86,11 +86,11 @@ image = pipeline(prompt="Upper body of a young woman in a Victorian-era outfit w Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model. -Refer to the [Quantization](../../quantization/overview) overview to learn more about supported quantization backends and selecting a quantization backend that supports your use case. The example below demonstrates how to load a quantized [`LuminaText2ImgPipeline`] for inference with bitsandbytes. +Refer to the [Quantization](../../quantization/overview) overview to learn more about supported quantization backends and selecting a quantization backend that supports your use case. The example below demonstrates how to load a quantized [`LuminaPipeline`] for inference with bitsandbytes. ```py import torch -from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, Transformer2DModel, LuminaText2ImgPipeline +from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, Transformer2DModel, LuminaPipeline from transformers import BitsAndBytesConfig as BitsAndBytesConfig, T5EncoderModel quant_config = BitsAndBytesConfig(load_in_8bit=True) @@ -109,7 +109,7 @@ transformer_8bit = Transformer2DModel.from_pretrained( torch_dtype=torch.float16, ) -pipeline = LuminaText2ImgPipeline.from_pretrained( +pipeline = LuminaPipeline.from_pretrained( "Alpha-VLLM/Lumina-Next-SFT-diffusers", text_encoder=text_encoder_8bit, transformer=transformer_8bit, @@ -122,9 +122,9 @@ image = pipeline(prompt).images[0] image.save("lumina.png") ``` -## LuminaText2ImgPipeline +## LuminaPipeline -[[autodoc]] LuminaText2ImgPipeline +[[autodoc]] LuminaPipeline - all - __call__ diff --git a/docs/source/en/api/pipelines/lumina2.md b/docs/source/en/api/pipelines/lumina2.md index 9134ccf86b79..323cc8b6ea4a 100644 --- a/docs/source/en/api/pipelines/lumina2.md +++ b/docs/source/en/api/pipelines/lumina2.md @@ -32,14 +32,14 @@ Single file loading for Lumina Image 2.0 is available for the `Lumina2Transforme ```python import torch -from diffusers import Lumina2Transformer2DModel, Lumina2Text2ImgPipeline +from diffusers import Lumina2Transformer2DModel, Lumina2Pipeline ckpt_path = "https://huggingface.co/Alpha-VLLM/Lumina-Image-2.0/blob/main/consolidated.00-of-01.pth" transformer = Lumina2Transformer2DModel.from_single_file( ckpt_path, torch_dtype=torch.bfloat16 ) -pipe = Lumina2Text2ImgPipeline.from_pretrained( +pipe = Lumina2Pipeline.from_pretrained( "Alpha-VLLM/Lumina-Image-2.0", transformer=transformer, torch_dtype=torch.bfloat16 ) pipe.enable_model_cpu_offload() @@ -56,7 +56,7 @@ image.save("lumina-single-file.png") GGUF Quantized checkpoints for the `Lumina2Transformer2DModel` can be loaded via `from_single_file` with the `GGUFQuantizationConfig` ```python -from diffusers import Lumina2Transformer2DModel, Lumina2Text2ImgPipeline, GGUFQuantizationConfig +from diffusers import Lumina2Transformer2DModel, Lumina2Pipeline, GGUFQuantizationConfig ckpt_path = "https://huggingface.co/calcuis/lumina-gguf/blob/main/lumina2-q4_0.gguf" transformer = Lumina2Transformer2DModel.from_single_file( @@ -65,7 +65,7 @@ transformer = Lumina2Transformer2DModel.from_single_file( torch_dtype=torch.bfloat16, ) -pipe = Lumina2Text2ImgPipeline.from_pretrained( +pipe = Lumina2Pipeline.from_pretrained( "Alpha-VLLM/Lumina-Image-2.0", transformer=transformer, torch_dtype=torch.bfloat16 ) pipe.enable_model_cpu_offload() @@ -76,8 +76,8 @@ image = pipe( image.save("lumina-gguf.png") ``` -## Lumina2Text2ImgPipeline +## Lumina2Pipeline -[[autodoc]] Lumina2Text2ImgPipeline +[[autodoc]] Lumina2Pipeline - all - __call__ diff --git a/scripts/convert_lumina_to_diffusers.py b/scripts/convert_lumina_to_diffusers.py index a12625d1376f..c14aad3c6bf2 100644 --- a/scripts/convert_lumina_to_diffusers.py +++ b/scripts/convert_lumina_to_diffusers.py @@ -5,7 +5,7 @@ from safetensors.torch import load_file from transformers import AutoModel, AutoTokenizer -from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, LuminaNextDiT2DModel, LuminaText2ImgPipeline +from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, LuminaNextDiT2DModel, LuminaPipeline def main(args): @@ -115,7 +115,7 @@ def main(args): tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") text_encoder = AutoModel.from_pretrained("google/gemma-2b") - pipeline = LuminaText2ImgPipeline( + pipeline = LuminaPipeline( tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, vae=vae, scheduler=scheduler ) pipeline.save_pretrained(args.dump_path) diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index a9e7c823db41..9c7ea6b20f1a 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -341,7 +341,9 @@ "LEditsPPPipelineStableDiffusionXL", "LTXImageToVideoPipeline", "LTXPipeline", + "Lumina2Pipeline", "Lumina2Text2ImgPipeline", + "LuminaPipeline", "LuminaText2ImgPipeline", "MarigoldDepthPipeline", "MarigoldNormalsPipeline", @@ -840,7 +842,9 @@ LEditsPPPipelineStableDiffusionXL, LTXImageToVideoPipeline, LTXPipeline, + Lumina2Pipeline, Lumina2Text2ImgPipeline, + LuminaPipeline, LuminaText2ImgPipeline, MarigoldDepthPipeline, MarigoldNormalsPipeline, diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 49041086f535..ed545fcedf47 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -256,8 +256,8 @@ ) _import_structure["latte"] = ["LattePipeline"] _import_structure["ltx"] = ["LTXPipeline", "LTXImageToVideoPipeline"] - _import_structure["lumina"] = ["LuminaText2ImgPipeline"] - _import_structure["lumina2"] = ["Lumina2Text2ImgPipeline"] + _import_structure["lumina"] = ["LuminaPipeline", "LuminaText2ImgPipeline"] + _import_structure["lumina2"] = ["Lumina2Pipeline", "Lumina2Text2ImgPipeline"] _import_structure["marigold"].extend( [ "MarigoldDepthPipeline", @@ -599,8 +599,8 @@ LEditsPPPipelineStableDiffusionXL, ) from .ltx import LTXImageToVideoPipeline, LTXPipeline - from .lumina import LuminaText2ImgPipeline - from .lumina2 import Lumina2Text2ImgPipeline + from .lumina import LuminaPipeline, LuminaText2ImgPipeline + from .lumina2 import Lumina2Pipeline, Lumina2Text2ImgPipeline from .marigold import ( MarigoldDepthPipeline, MarigoldNormalsPipeline, diff --git a/src/diffusers/pipelines/auto_pipeline.py b/src/diffusers/pipelines/auto_pipeline.py index 1c38f83a7ef3..98e071d2865b 100644 --- a/src/diffusers/pipelines/auto_pipeline.py +++ b/src/diffusers/pipelines/auto_pipeline.py @@ -65,8 +65,8 @@ ) from .kandinsky3 import Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline from .latent_consistency_models import LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline -from .lumina import LuminaText2ImgPipeline -from .lumina2 import Lumina2Text2ImgPipeline +from .lumina import LuminaPipeline +from .lumina2 import Lumina2Pipeline from .pag import ( HunyuanDiTPAGPipeline, PixArtSigmaPAGPipeline, @@ -136,8 +136,8 @@ ("flux", FluxPipeline), ("flux-control", FluxControlPipeline), ("flux-controlnet", FluxControlNetPipeline), - ("lumina", LuminaText2ImgPipeline), - ("lumina2", Lumina2Text2ImgPipeline), + ("lumina", LuminaPipeline), + ("lumina2", Lumina2Pipeline), ("cogview3", CogView3PlusPipeline), ("cogview4", CogView4Pipeline), ] diff --git a/src/diffusers/pipelines/lumina/__init__.py b/src/diffusers/pipelines/lumina/__init__.py index ca1396359721..a19dc7e94641 100644 --- a/src/diffusers/pipelines/lumina/__init__.py +++ b/src/diffusers/pipelines/lumina/__init__.py @@ -22,7 +22,7 @@ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: - _import_structure["pipeline_lumina"] = ["LuminaText2ImgPipeline"] + _import_structure["pipeline_lumina"] = ["LuminaPipeline", "LuminaText2ImgPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: @@ -32,7 +32,7 @@ except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: - from .pipeline_lumina import LuminaText2ImgPipeline + from .pipeline_lumina import LuminaPipeline, LuminaText2ImgPipeline else: import sys diff --git a/src/diffusers/pipelines/lumina/pipeline_lumina.py b/src/diffusers/pipelines/lumina/pipeline_lumina.py index 4f6793e17b37..21fe5644efa2 100644 --- a/src/diffusers/pipelines/lumina/pipeline_lumina.py +++ b/src/diffusers/pipelines/lumina/pipeline_lumina.py @@ -30,6 +30,7 @@ from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( BACKENDS_MAPPING, + deprecate, is_bs4_available, is_ftfy_available, is_torch_xla_available, @@ -60,9 +61,9 @@ Examples: ```py >>> import torch - >>> from diffusers import LuminaText2ImgPipeline + >>> from diffusers import LuminaPipeline - >>> pipe = LuminaText2ImgPipeline.from_pretrained( + >>> pipe = LuminaPipeline.from_pretrained( ... "Alpha-VLLM/Lumina-Next-SFT-diffusers", torch_dtype=torch.bfloat16 ... ) >>> # Enable memory optimizations. @@ -134,7 +135,7 @@ def retrieve_timesteps( return timesteps, num_inference_steps -class LuminaText2ImgPipeline(DiffusionPipeline): +class LuminaPipeline(DiffusionPipeline): r""" Pipeline for text-to-image generation using Lumina-T2I. @@ -935,3 +936,10 @@ def __call__( return (image,) return ImagePipelineOutput(images=image) + + +class LuminaText2ImgPipeline(LuminaPipeline): + def __init__(self, *args, **kwargs): + deprecation_message = "`LuminaText2ImgPipeline` has been renamed to `LuminaPipeline` and will be removed in a future version. Please use `LuminaPipeline` instead." + deprecate("diffusers.pipelines.lumina.pipeline_lumina.LuminaText2ImgPipeline", "0.34", deprecation_message) + super().__init__(*args, **kwargs) diff --git a/src/diffusers/pipelines/lumina2/__init__.py b/src/diffusers/pipelines/lumina2/__init__.py index 0e51a768a785..b1d6bfeb0d58 100644 --- a/src/diffusers/pipelines/lumina2/__init__.py +++ b/src/diffusers/pipelines/lumina2/__init__.py @@ -22,7 +22,7 @@ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: - _import_structure["pipeline_lumina2"] = ["Lumina2Text2ImgPipeline"] + _import_structure["pipeline_lumina2"] = ["Lumina2Pipeline", "Lumina2Text2ImgPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: @@ -32,7 +32,7 @@ except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: - from .pipeline_lumina2 import Lumina2Text2ImgPipeline + from .pipeline_lumina2 import Lumina2Pipeline, Lumina2Text2ImgPipeline else: import sys diff --git a/src/diffusers/pipelines/lumina2/pipeline_lumina2.py b/src/diffusers/pipelines/lumina2/pipeline_lumina2.py index cc594c50cb49..37217dda0dde 100644 --- a/src/diffusers/pipelines/lumina2/pipeline_lumina2.py +++ b/src/diffusers/pipelines/lumina2/pipeline_lumina2.py @@ -24,6 +24,7 @@ from ...models.transformers.transformer_lumina2 import Lumina2Transformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( + deprecate, is_torch_xla_available, logging, replace_example_docstring, @@ -46,9 +47,9 @@ Examples: ```py >>> import torch - >>> from diffusers import Lumina2Text2ImgPipeline + >>> from diffusers import Lumina2Pipeline - >>> pipe = Lumina2Text2ImgPipeline.from_pretrained("Alpha-VLLM/Lumina-Image-2.0", torch_dtype=torch.bfloat16) + >>> pipe = Lumina2Pipeline.from_pretrained("Alpha-VLLM/Lumina-Image-2.0", torch_dtype=torch.bfloat16) >>> # Enable memory optimizations. >>> pipe.enable_model_cpu_offload() @@ -132,7 +133,7 @@ def retrieve_timesteps( return timesteps, num_inference_steps -class Lumina2Text2ImgPipeline(DiffusionPipeline): +class Lumina2Pipeline(DiffusionPipeline): r""" Pipeline for text-to-image generation using Lumina-T2I. @@ -757,3 +758,10 @@ def __call__( return (image,) return ImagePipelineOutput(images=image) + + +class Lumina2Text2ImgPipeline(Lumina2Pipeline): + def __init__(self, *args, **kwargs): + deprecation_message = "`Lumina2Text2ImgPipeline` has been renamed to `Lumina2Pipeline` and will be removed in a future version. Please use `Lumina2Pipeline` instead." + deprecate("diffusers.pipelines.lumina2.pipeline_lumina2.Lumina2Text2ImgPipeline", "0.34", deprecation_message) + super().__init__(*args, **kwargs) diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index c853cf8faa55..30702713013e 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -1172,6 +1172,21 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) +class Lumina2Pipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + class LuminaText2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] @@ -1187,6 +1202,21 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) +class LuminaPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + class MarigoldDepthPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] diff --git a/tests/pipelines/lumina/test_lumina_nextdit.py b/tests/pipelines/lumina/test_lumina_nextdit.py index 18dcdef98d7d..fc9db4c0d582 100644 --- a/tests/pipelines/lumina/test_lumina_nextdit.py +++ b/tests/pipelines/lumina/test_lumina_nextdit.py @@ -5,7 +5,7 @@ import torch from transformers import AutoTokenizer, GemmaConfig, GemmaForCausalLM -from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, LuminaNextDiT2DModel, LuminaText2ImgPipeline +from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, LuminaNextDiT2DModel, LuminaPipeline from diffusers.utils.testing_utils import ( numpy_cosine_similarity_distance, require_torch_gpu, @@ -16,8 +16,8 @@ from ..test_pipelines_common import PipelineTesterMixin -class LuminaText2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterMixin): - pipeline_class = LuminaText2ImgPipeline +class LuminaPipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = LuminaPipeline params = frozenset( [ "prompt", @@ -130,8 +130,8 @@ def test_xformers_attention_forwardGenerator_pass(self): @slow @require_torch_gpu -class LuminaText2ImgPipelineSlowTests(unittest.TestCase): - pipeline_class = LuminaText2ImgPipeline +class LuminaPipelineSlowTests(unittest.TestCase): + pipeline_class = LuminaPipeline repo_id = "Alpha-VLLM/Lumina-Next-SFT-diffusers" def setUp(self): diff --git a/tests/pipelines/lumina2/test_pipeline_lumina2.py b/tests/pipelines/lumina2/test_pipeline_lumina2.py index f8e0667ce1d2..b21d42807e73 100644 --- a/tests/pipelines/lumina2/test_pipeline_lumina2.py +++ b/tests/pipelines/lumina2/test_pipeline_lumina2.py @@ -7,7 +7,7 @@ from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, - Lumina2Text2ImgPipeline, + Lumina2Pipeline, Lumina2Transformer2DModel, ) from diffusers.utils.testing_utils import torch_device @@ -15,8 +15,8 @@ from ..test_pipelines_common import PipelineTesterMixin -class Lumina2Text2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterMixin): - pipeline_class = Lumina2Text2ImgPipeline +class Lumina2PipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = Lumina2Pipeline params = frozenset( [ "prompt", From 58a34a01e44f23396e7db7b072001d15567057e2 Mon Sep 17 00:00:00 2001 From: hlky Date: Wed, 19 Feb 2025 08:25:51 +0000 Subject: [PATCH 2/8] make style --- src/diffusers/pipelines/lumina/pipeline_lumina.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/diffusers/pipelines/lumina/pipeline_lumina.py b/src/diffusers/pipelines/lumina/pipeline_lumina.py index 21fe5644efa2..f7a5ccc19a92 100644 --- a/src/diffusers/pipelines/lumina/pipeline_lumina.py +++ b/src/diffusers/pipelines/lumina/pipeline_lumina.py @@ -63,9 +63,7 @@ >>> import torch >>> from diffusers import LuminaPipeline - >>> pipe = LuminaPipeline.from_pretrained( - ... "Alpha-VLLM/Lumina-Next-SFT-diffusers", torch_dtype=torch.bfloat16 - ... ) + >>> pipe = LuminaPipeline.from_pretrained("Alpha-VLLM/Lumina-Next-SFT-diffusers", torch_dtype=torch.bfloat16) >>> # Enable memory optimizations. >>> pipe.enable_model_cpu_offload() From f49dd4e854b7f22f40af06e62d19f15738ccf987 Mon Sep 17 00:00:00 2001 From: hlky Date: Wed, 19 Feb 2025 08:37:44 +0000 Subject: [PATCH 3/8] make fix-copies --- .../utils/dummy_torch_and_transformers_objects.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index 30702713013e..6c9c55ed614e 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -1157,7 +1157,7 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) -class Lumina2Text2ImgPipeline(metaclass=DummyObject): +class Lumina2Pipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): @@ -1172,7 +1172,7 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) -class Lumina2Pipeline(metaclass=DummyObject): +class Lumina2Text2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): @@ -1187,7 +1187,7 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) -class LuminaText2ImgPipeline(metaclass=DummyObject): +class LuminaPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): @@ -1202,7 +1202,7 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) -class LuminaPipeline(metaclass=DummyObject): +class LuminaText2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): From 1e035c087e554fcb095a1d2d52f97340b28c5143 Mon Sep 17 00:00:00 2001 From: hlky Date: Wed, 19 Feb 2025 08:55:35 +0000 Subject: [PATCH 4/8] deprecation test --- .../pipelines/lumina/pipeline_lumina.py | 17 +++++++++++++++-- .../pipelines/lumina2/pipeline_lumina2.py | 17 +++++++++++++++-- tests/pipelines/lumina/test_lumina_nextdit.py | 14 +++++++++++++- .../pipelines/lumina2/test_pipeline_lumina2.py | 7 +++++++ 4 files changed, 50 insertions(+), 5 deletions(-) diff --git a/src/diffusers/pipelines/lumina/pipeline_lumina.py b/src/diffusers/pipelines/lumina/pipeline_lumina.py index f7a5ccc19a92..937f5acb687e 100644 --- a/src/diffusers/pipelines/lumina/pipeline_lumina.py +++ b/src/diffusers/pipelines/lumina/pipeline_lumina.py @@ -937,7 +937,20 @@ def __call__( class LuminaText2ImgPipeline(LuminaPipeline): - def __init__(self, *args, **kwargs): + def __init__( + self, + transformer: LuminaNextDiT2DModel, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKL, + text_encoder: AutoModel, + tokenizer: AutoTokenizer, + ): deprecation_message = "`LuminaText2ImgPipeline` has been renamed to `LuminaPipeline` and will be removed in a future version. Please use `LuminaPipeline` instead." deprecate("diffusers.pipelines.lumina.pipeline_lumina.LuminaText2ImgPipeline", "0.34", deprecation_message) - super().__init__(*args, **kwargs) + super().__init__( + transformer=transformer, + scheduler=scheduler, + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + ) diff --git a/src/diffusers/pipelines/lumina2/pipeline_lumina2.py b/src/diffusers/pipelines/lumina2/pipeline_lumina2.py index 37217dda0dde..fcd5df04b3c5 100644 --- a/src/diffusers/pipelines/lumina2/pipeline_lumina2.py +++ b/src/diffusers/pipelines/lumina2/pipeline_lumina2.py @@ -761,7 +761,20 @@ def __call__( class Lumina2Text2ImgPipeline(Lumina2Pipeline): - def __init__(self, *args, **kwargs): + def __init__( + self, + transformer: Lumina2Transformer2DModel, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKL, + text_encoder: AutoModel, + tokenizer: AutoTokenizer, + ): deprecation_message = "`Lumina2Text2ImgPipeline` has been renamed to `Lumina2Pipeline` and will be removed in a future version. Please use `Lumina2Pipeline` instead." deprecate("diffusers.pipelines.lumina2.pipeline_lumina2.Lumina2Text2ImgPipeline", "0.34", deprecation_message) - super().__init__(*args, **kwargs) + super().__init__( + transformer=transformer, + scheduler=scheduler, + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + ) diff --git a/tests/pipelines/lumina/test_lumina_nextdit.py b/tests/pipelines/lumina/test_lumina_nextdit.py index fc9db4c0d582..031647a3eda9 100644 --- a/tests/pipelines/lumina/test_lumina_nextdit.py +++ b/tests/pipelines/lumina/test_lumina_nextdit.py @@ -5,7 +5,13 @@ import torch from transformers import AutoTokenizer, GemmaConfig, GemmaForCausalLM -from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, LuminaNextDiT2DModel, LuminaPipeline +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + LuminaNextDiT2DModel, + LuminaPipeline, + LuminaText2ImgPipeline, +) from diffusers.utils.testing_utils import ( numpy_cosine_similarity_distance, require_torch_gpu, @@ -127,6 +133,12 @@ def test_lumina_prompt_embeds(self): def test_xformers_attention_forwardGenerator_pass(self): pass + def test_deprecation_raises_warning(self): + with self.assertWarns(FutureWarning) as warning: + _ = LuminaText2ImgPipeline(**self.get_dummy_components()).to(torch_device) + warning_message = str(warning.warnings[0].message) + assert "renamed to `LuminaPipeline`" in warning_message + @slow @require_torch_gpu diff --git a/tests/pipelines/lumina2/test_pipeline_lumina2.py b/tests/pipelines/lumina2/test_pipeline_lumina2.py index b21d42807e73..089c372c0cc8 100644 --- a/tests/pipelines/lumina2/test_pipeline_lumina2.py +++ b/tests/pipelines/lumina2/test_pipeline_lumina2.py @@ -8,6 +8,7 @@ AutoencoderKL, FlowMatchEulerDiscreteScheduler, Lumina2Pipeline, + Lumina2Text2ImgPipeline, Lumina2Transformer2DModel, ) from diffusers.utils.testing_utils import torch_device @@ -145,3 +146,9 @@ def test_lumina_prompt_embeds(self): max_diff = np.abs(output_with_prompt - output_with_embeds).max() assert max_diff < 1e-4 + + def test_deprecation_raises_warning(self): + with self.assertWarns(FutureWarning) as warning: + _ = Lumina2Text2ImgPipeline(**self.get_dummy_components()).to(torch_device) + warning_message = str(warning.warnings[0].message) + assert "renamed to `Lumina2Pipeline`" in warning_message From 646e95e386e344211b09fa6abaaf33648889e83c Mon Sep 17 00:00:00 2001 From: hlky Date: Sat, 22 Feb 2025 18:36:35 +0000 Subject: [PATCH 5/8] Update pipeline_lumina.py --- src/diffusers/pipelines/lumina/pipeline_lumina.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/diffusers/pipelines/lumina/pipeline_lumina.py b/src/diffusers/pipelines/lumina/pipeline_lumina.py index ca30b3bb3796..816213f105cb 100644 --- a/src/diffusers/pipelines/lumina/pipeline_lumina.py +++ b/src/diffusers/pipelines/lumina/pipeline_lumina.py @@ -939,8 +939,8 @@ def __init__( transformer: LuminaNextDiT2DModel, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, - text_encoder: AutoModel, - tokenizer: AutoTokenizer, + text_encoder: GemmaPreTrainedModel, + tokenizer: Union[GemmaTokenizer, GemmaTokenizerFast], ): deprecation_message = "`LuminaText2ImgPipeline` has been renamed to `LuminaPipeline` and will be removed in a future version. Please use `LuminaPipeline` instead." deprecate("diffusers.pipelines.lumina.pipeline_lumina.LuminaText2ImgPipeline", "0.34", deprecation_message) From 7a805e4dee131560a3831842e0dc7f4ecdd63213 Mon Sep 17 00:00:00 2001 From: hlky Date: Sat, 22 Feb 2025 18:37:14 +0000 Subject: [PATCH 6/8] Update pipeline_lumina2.py --- src/diffusers/pipelines/lumina2/pipeline_lumina2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/diffusers/pipelines/lumina2/pipeline_lumina2.py b/src/diffusers/pipelines/lumina2/pipeline_lumina2.py index e487d3ec73ea..e0905a2f131f 100644 --- a/src/diffusers/pipelines/lumina2/pipeline_lumina2.py +++ b/src/diffusers/pipelines/lumina2/pipeline_lumina2.py @@ -776,8 +776,8 @@ def __init__( transformer: Lumina2Transformer2DModel, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, - text_encoder: AutoModel, - tokenizer: AutoTokenizer, + text_encoder: Gemma2PreTrainedModel, + tokenizer: Union[GemmaTokenizer, GemmaTokenizerFast], ): deprecation_message = "`Lumina2Text2ImgPipeline` has been renamed to `Lumina2Pipeline` and will be removed in a future version. Please use `Lumina2Pipeline` instead." deprecate("diffusers.pipelines.lumina2.pipeline_lumina2.Lumina2Text2ImgPipeline", "0.34", deprecation_message) From 8e53a9904d47750ced24d5e814344e47a3990235 Mon Sep 17 00:00:00 2001 From: hlky Date: Sat, 22 Feb 2025 18:39:46 +0000 Subject: [PATCH 7/8] Update test_pipeline_lumina2.py --- tests/pipelines/lumina2/test_pipeline_lumina2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pipelines/lumina2/test_pipeline_lumina2.py b/tests/pipelines/lumina2/test_pipeline_lumina2.py index 34f0f4da01f7..1a22f0489171 100644 --- a/tests/pipelines/lumina2/test_pipeline_lumina2.py +++ b/tests/pipelines/lumina2/test_pipeline_lumina2.py @@ -10,7 +10,7 @@ Lumina2Text2ImgPipeline, Lumina2Transformer2DModel, ) - +from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import PipelineTesterMixin From cc86e4e4f2fc0fe28d9919cf689c8d77b5e09bd5 Mon Sep 17 00:00:00 2001 From: hlky Date: Sat, 22 Feb 2025 18:42:26 +0000 Subject: [PATCH 8/8] make --- tests/pipelines/lumina2/test_pipeline_lumina2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pipelines/lumina2/test_pipeline_lumina2.py b/tests/pipelines/lumina2/test_pipeline_lumina2.py index 1a22f0489171..33fc870bcd34 100644 --- a/tests/pipelines/lumina2/test_pipeline_lumina2.py +++ b/tests/pipelines/lumina2/test_pipeline_lumina2.py @@ -11,6 +11,7 @@ Lumina2Transformer2DModel, ) from diffusers.utils.testing_utils import torch_device + from ..test_pipelines_common import PipelineTesterMixin @@ -122,4 +123,3 @@ def test_deprecation_raises_warning(self): _ = Lumina2Text2ImgPipeline(**self.get_dummy_components()).to(torch_device) warning_message = str(warning.warnings[0].message) assert "renamed to `Lumina2Pipeline`" in warning_message -