From 7e20a8b3ff1394d082a684b159348394aa575b5b Mon Sep 17 00:00:00 2001 From: Chong Date: Mon, 21 Aug 2023 22:16:21 +0800 Subject: [PATCH 01/36] T2I-Adapter-XL --- src/diffusers/models/adapter.py | 76 +++++++++++++++++++++++ src/diffusers/models/unet_2d_condition.py | 3 + 2 files changed, 79 insertions(+) diff --git a/src/diffusers/models/adapter.py b/src/diffusers/models/adapter.py index b9ffc64d912f..e97ba2133b25 100644 --- a/src/diffusers/models/adapter.py +++ b/src/diffusers/models/adapter.py @@ -128,6 +128,8 @@ def __init__( if adapter_type == "full_adapter": self.adapter = FullAdapter(in_channels, channels, num_res_blocks, downscale_factor) + elif adapter_type == "full_adapter_xl": + self.adapter = FullAdapter_XL(in_channels, channels, num_res_blocks, downscale_factor) elif adapter_type == "light_adapter": self.adapter = LightAdapter(in_channels, channels, num_res_blocks, downscale_factor) else: @@ -184,6 +186,80 @@ def forward(self, x: torch.Tensor) -> List[torch.Tensor]: return features +class FullAdapter_XL(nn.Module): + def __init__( + self, + in_channels: int = 3, + channels: List[int] = [320, 640, 1280, 1280], + num_res_blocks: int = 2, + downscale_factor: int = 8, + ): + super().__init__() + + in_channels = in_channels * downscale_factor**2 + self.channels = channels + self.num_res_blocks = num_res_blocks + + self.unshuffle = nn.PixelUnshuffle(downscale_factor) + self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1) + self.body = [] + for i in range(len(channels)): + for j in range(num_res_blocks): + if (i == 2) and (j == 0): + self.body.append( + AdapterResnetBlock_XL(channels[i - 1], channels[i], down=True)) + elif (i == 1) and (j == 0): + self.body.append( + AdapterResnetBlock_XL(channels[i - 1], channels[i], down=False)) + else: + self.body.append( + AdapterResnetBlock_XL(channels[i], channels[i], down=False)) + self.body = nn.ModuleList(self.body) + + self.total_downscale_factor = downscale_factor * 2 ** (len(channels) - 1) + + def forward(self, x: torch.Tensor) -> List[torch.Tensor]: + x = self.unshuffle(x) + x = self.conv_in(x) + + features = [] + + for i in range(len(self.channels)): + for j in range(self.num_res_blocks): + idx = i * self.num_res_blocks + j + x = self.body[idx](x) + features.append(x) + + return features + + +class AdapterResnetBlock_XL(nn.Module): + def __init__(self, channels_in, channels_out, down=False): + super().__init__() + if channels_in != channels_out: + self.in_conv = nn.Conv2d(channels_in, channels_out, 1) + else: + self.in_conv = None + self.block1 = nn.Conv2d(channels_out, channels_out, kernel_size=3, padding=1) + self.act = nn.ReLU() + self.block2 = nn.Conv2d(channels_out, channels_out, kernel_size=1) + self.downsample = None + if down: + self.downsample = Downsample2D(channels_in) + + def forward(self, x): + if self.downsample is not None: + x = self.downsample(x) + if self.in_conv is not None: + x = self.in_conv(x) + h = x + h = self.block1(h) + h = self.act(h) + h = self.block2(h) + + return h + x + + class AdapterBlock(nn.Module): def __init__(self, in_channels, out_channels, num_res_blocks, down=False): super().__init__() diff --git a/src/diffusers/models/unet_2d_condition.py b/src/diffusers/models/unet_2d_condition.py index c40567e57702..89115284a7be 100644 --- a/src/diffusers/models/unet_2d_condition.py +++ b/src/diffusers/models/unet_2d_condition.py @@ -965,6 +965,9 @@ def forward( cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, ) + # To support T2I-Adapter-XL + if is_adapter and len(down_block_additional_residuals) > 0: + sample += down_block_additional_residuals.pop(0) if is_controlnet: sample = sample + mid_block_additional_residual From a45cc6a201ec8025d28d1599357ab9c6c2fc5a6f Mon Sep 17 00:00:00 2001 From: Chong Date: Tue, 22 Aug 2023 11:32:06 +0800 Subject: [PATCH 02/36] update --- src/diffusers/models/adapter.py | 60 ++++++++------------------------- 1 file changed, 14 insertions(+), 46 deletions(-) diff --git a/src/diffusers/models/adapter.py b/src/diffusers/models/adapter.py index e97ba2133b25..9805a0917da2 100644 --- a/src/diffusers/models/adapter.py +++ b/src/diffusers/models/adapter.py @@ -192,31 +192,28 @@ def __init__( in_channels: int = 3, channels: List[int] = [320, 640, 1280, 1280], num_res_blocks: int = 2, - downscale_factor: int = 8, + downscale_factor: int = 16, ): super().__init__() in_channels = in_channels * downscale_factor**2 - self.channels = channels - self.num_res_blocks = num_res_blocks self.unshuffle = nn.PixelUnshuffle(downscale_factor) self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1) + self.body = [] + # blocks to extract XL features with dimensions of [320, 64, 64], [640, 64, 64], [1280, 32, 32], [1280, 32, 32] for i in range(len(channels)): - for j in range(num_res_blocks): - if (i == 2) and (j == 0): - self.body.append( - AdapterResnetBlock_XL(channels[i - 1], channels[i], down=True)) - elif (i == 1) and (j == 0): - self.body.append( - AdapterResnetBlock_XL(channels[i - 1], channels[i], down=False)) - else: - self.body.append( - AdapterResnetBlock_XL(channels[i], channels[i], down=False)) - self.body = nn.ModuleList(self.body) + if i==1: + self.body.append(AdapterBlock(channels[i-1], channels[i], num_res_blocks)) + elif i==2: + self.body.append(AdapterBlock(channels[i-1], channels[i], num_res_blocks, down=True)) + else: + self.body.append(AdapterBlock(channels[i], channels[i], num_res_blocks)) - self.total_downscale_factor = downscale_factor * 2 ** (len(channels) - 1) + self.body = nn.ModuleList(self.body) + # XL has one fewer downsampling + self.total_downscale_factor = downscale_factor * 2 ** (len(channels) - 2) def forward(self, x: torch.Tensor) -> List[torch.Tensor]: x = self.unshuffle(x) @@ -224,42 +221,13 @@ def forward(self, x: torch.Tensor) -> List[torch.Tensor]: features = [] - for i in range(len(self.channels)): - for j in range(self.num_res_blocks): - idx = i * self.num_res_blocks + j - x = self.body[idx](x) + for block in self.body: + x = block(x) features.append(x) return features -class AdapterResnetBlock_XL(nn.Module): - def __init__(self, channels_in, channels_out, down=False): - super().__init__() - if channels_in != channels_out: - self.in_conv = nn.Conv2d(channels_in, channels_out, 1) - else: - self.in_conv = None - self.block1 = nn.Conv2d(channels_out, channels_out, kernel_size=3, padding=1) - self.act = nn.ReLU() - self.block2 = nn.Conv2d(channels_out, channels_out, kernel_size=1) - self.downsample = None - if down: - self.downsample = Downsample2D(channels_in) - - def forward(self, x): - if self.downsample is not None: - x = self.downsample(x) - if self.in_conv is not None: - x = self.in_conv(x) - h = x - h = self.block1(h) - h = self.act(h) - h = self.block2(h) - - return h + x - - class AdapterBlock(nn.Module): def __init__(self, in_channels, out_channels, num_res_blocks, down=False): super().__init__() From cd829c9e32a2472f16e5d6c1e3b9419019cd9d82 Mon Sep 17 00:00:00 2001 From: Chong Date: Tue, 22 Aug 2023 11:50:41 +0800 Subject: [PATCH 03/36] update --- src/diffusers/models/adapter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/diffusers/models/adapter.py b/src/diffusers/models/adapter.py index 9805a0917da2..0e01046daed8 100644 --- a/src/diffusers/models/adapter.py +++ b/src/diffusers/models/adapter.py @@ -129,7 +129,7 @@ def __init__( if adapter_type == "full_adapter": self.adapter = FullAdapter(in_channels, channels, num_res_blocks, downscale_factor) elif adapter_type == "full_adapter_xl": - self.adapter = FullAdapter_XL(in_channels, channels, num_res_blocks, downscale_factor) + self.adapter = FullAdapterXL(in_channels, channels, num_res_blocks, downscale_factor) elif adapter_type == "light_adapter": self.adapter = LightAdapter(in_channels, channels, num_res_blocks, downscale_factor) else: @@ -186,7 +186,7 @@ def forward(self, x: torch.Tensor) -> List[torch.Tensor]: return features -class FullAdapter_XL(nn.Module): +class FullAdapterXL(nn.Module): def __init__( self, in_channels: int = 3, From 44b3454ef36b19f392cb22729780f979ab8cd04c Mon Sep 17 00:00:00 2001 From: Chong Date: Wed, 23 Aug 2023 11:45:35 +0800 Subject: [PATCH 04/36] add pipeline --- src/diffusers/__init__.py | 1 + src/diffusers/pipelines/__init__.py | 1 + .../pipelines/t2i_adapter/__init__.py | 1 + .../pipeline_stable_diffusion_xl_adapter.py | 939 ++++++++++++++++++ 4 files changed, 942 insertions(+) create mode 100644 src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index fb77de174a3b..bca89bb81048 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -169,6 +169,7 @@ ShapEImg2ImgPipeline, ShapEPipeline, StableDiffusionAdapterPipeline, + StableDiffusionXLAdapterPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImg2ImgPipeline, StableDiffusionControlNetInpaintPipeline, diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 33893a350b3d..b3d8b765cee2 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -119,6 +119,7 @@ StableDiffusionXLPipeline, ) from .t2i_adapter import StableDiffusionAdapterPipeline + from .t2i_adapter import StableDiffusionAdapterPipeline from .text_to_video_synthesis import TextToVideoSDPipeline, TextToVideoZeroPipeline, VideoToVideoSDPipeline from .unclip import UnCLIPImageVariationPipeline, UnCLIPPipeline from .unidiffuser import ImageTextPipelineOutput, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder diff --git a/src/diffusers/pipelines/t2i_adapter/__init__.py b/src/diffusers/pipelines/t2i_adapter/__init__.py index c4de661dbefa..a9a81df36a1a 100644 --- a/src/diffusers/pipelines/t2i_adapter/__init__.py +++ b/src/diffusers/pipelines/t2i_adapter/__init__.py @@ -12,3 +12,4 @@ from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_stable_diffusion_adapter import StableDiffusionAdapterPipeline + from .pipeline_stable_diffusion_xl_adapter import StableDiffusionXLAdapterPipeline diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py new file mode 100644 index 000000000000..15ad70302984 --- /dev/null +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py @@ -0,0 +1,939 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from dataclasses import dataclass + +import torch +from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer +import PIL +import numpy as np + +from ...image_processor import VaeImageProcessor +from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, MultiAdapter, T2IAdapter, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + XFormersAttnProcessor, +) +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + PIL_INTERPOLATION, + BaseOutput, + is_accelerate_available, + is_accelerate_version, + logging, + randn_tensor, + replace_example_docstring, +) +from ..pipeline_utils import DiffusionPipeline +from basicsr.utils import tensor2img + +@dataclass +class StableDiffusionAdapterXLPipelineOutput(BaseOutput): + """ + Output class for Stable Diffusion pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionXLPipeline + + >>> pipe = StableDiffusionXLPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt).images[0] + ``` +""" + +def _preprocess_adapter_image(image, height, width): + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image] + image = [ + i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image + ] # expand [h, w] or [h, w, c] to [b, h, w, c] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + if image[0].ndim == 3: + image = torch.stack(image, dim=0) + elif image[0].ndim == 4: + image = torch.cat(image, dim=0) + else: + raise ValueError( + f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}" + ) + return image + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +class StableDiffusionXLAdapterPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin): + r""" + Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter + https://arxiv.org/abs/2302.08453 + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`): + Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a + list, the outputs from each Adapter are added together to create one combined additional conditioning. + adapter_weights (`List[float]`, *optional*, defaults to None): + List of floats representing the weight which will be multiply to each adapter's output before adding them + together. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPFeatureExtractor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + adapter: Union[T2IAdapter, MultiAdapter, List[T2IAdapter]], + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + adapter=adapter, + scheduler=scheduler, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.default_sample_size = self.unet.config.sample_size + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing + def enable_vae_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.vae.enable_slicing() + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing + def disable_vae_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_slicing() + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling + def enable_vae_tiling(self): + r""" + Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to + compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow + processing larger images. + """ + self.vae.enable_tiling() + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling + def disable_vae_tiling(self): + r""" + Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_tiling() + + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): + from accelerate import cpu_offload_with_hook + else: + raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") + + device = torch.device(f"cuda:{gpu_id}") + + if self.device.type != "cpu": + self.to("cpu", silence_dtype_warnings=True) + torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) + + model_sequence = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + model_sequence.extend([self.unet, self.vae, self.adapter]) + + hook = None + for cpu_offloaded_model in model_sequence: + _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) + + # We'll offload the last model manually. + self.final_offload_hook = hook + + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + # textual inversion: procecss multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder( + text_input_ids.to(device), + output_hidden_states=True, + ) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + prompt_embeds = prompt_embeds.hidden_states[-2] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt, negative_prompt_2] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + LoRAXFormersAttnProcessor, + LoRAAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + def _default_height_width(self, height, width, image): + # NOTE: It is possible that a list of images have different + # dimensions for each image, so just checking the first image + # is not _exactly_ correct, but it is simple. + while isinstance(image, list): + image = image[0] + + if height is None: + if isinstance(image, PIL.Image.Image): + height = image.height + elif isinstance(image, torch.Tensor): + height = image.shape[-2] + + # round down to nearest multiple of `self.adapter.total_downscale_factor` + height = (height // self.adapter.total_downscale_factor) * self.adapter.total_downscale_factor + + if width is None: + if isinstance(image, PIL.Image.Image): + width = image.width + elif isinstance(image, torch.Tensor): + width = image.shape[-1] + + # round down to nearest multiple of `self.adapter.total_downscale_factor` + width = (width // self.adapter.total_downscale_factor) * self.adapter.total_downscale_factor + + return height, width + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: Union[torch.Tensor, PIL.Image.Image, List[PIL.Image.Image]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + adapter_conditioning_scale: Union[float, List[float]] = 1.0, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`): + The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the + type is specified as `Torch.FloatTensor`, it is passed to Adapter as is. PIL.Image.Image` can also be + accepted as an image. The control image is automatically resized to fit the output image. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionAdapterPipelineOutput`] instead + of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.7): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the + residual in the original unet. If multiple adapters are specified in init, you can set the + corresponding scale as a list. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + # 0. Default height and width to unet + # height = height or self.default_sample_size * self.vae_scale_factor + # width = width or self.default_sample_size * self.vae_scale_factor + + height, width = self._default_height_width(height, width, image) + device = self._execution_device + + adapter_input = _preprocess_adapter_image(image, height, width).to(device) + + original_size = (height, width) #original_size or (height, width) + target_size = (height, width) #target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + # text_encoder_lora_scale = ( + # cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + # ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + # lora_scale=text_encoder_lora_scale, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare added time ids & embeddings & adapter features + adapter_input = adapter_input.type(latents.dtype) + adapter_state = self.adapter(adapter_input) + for k, v in enumerate(adapter_state): + adapter_state[k] = v * adapter_conditioning_scale + if num_images_per_prompt > 1: + for k, v in enumerate(adapter_state): + adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1) + if do_classifier_free_guidance: + for k, v in enumerate(adapter_state): + adapter_state[k] = torch.cat([v] * 2, dim=0) + + add_text_embeds = pooled_prompt_embeds + add_time_ids = self._get_add_time_ids( + original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + # 8. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + # 7.1 Apply denoising_end + if denoising_end is not None and type(denoising_end) == float and denoising_end > 0 and denoising_end < 1: + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + down_block_additional_residuals=[state.clone() for state in adapter_state], + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + if do_classifier_free_guidance and guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.dtype == torch.float16 and self.vae.config.force_upcast: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + return StableDiffusionAdapterXLPipelineOutput(images=image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload last model to CPU + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() + + if not return_dict: + return (image,) + + return StableDiffusionAdapterXLPipelineOutput(images=image) From d288b3b7e1c7fd091f5e4cddee1d80f7d9d274dc Mon Sep 17 00:00:00 2001 From: Chong Date: Wed, 23 Aug 2023 13:25:15 +0800 Subject: [PATCH 05/36] modify pipeline --- .../pipeline_stable_diffusion_xl_adapter.py | 38 ++++++------------- 1 file changed, 11 insertions(+), 27 deletions(-) diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py index 15ad70302984..b7bea3833767 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2023 TencentARC and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,9 +13,7 @@ # limitations under the License. import inspect -import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union -from dataclasses import dataclass import torch from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer @@ -42,20 +40,7 @@ replace_example_docstring, ) from ..pipeline_utils import DiffusionPipeline -from basicsr.utils import tensor2img - -@dataclass -class StableDiffusionAdapterXLPipelineOutput(BaseOutput): - """ - Output class for Stable Diffusion pipelines. - - Args: - images (`List[PIL.Image.Image]` or `np.ndarray`) - List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, - num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. - """ - - images: Union[List[PIL.Image.Image], np.ndarray] +from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name @@ -213,6 +198,7 @@ def disable_vae_tiling(self): """ self.vae.disable_tiling() + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.enable_model_cpu_offload def enable_model_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared @@ -243,6 +229,7 @@ def enable_model_cpu_offload(self, gpu_id=0): # We'll offload the last model manually. self.final_offload_hook = hook + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.encode_prompt def encode_prompt( self, prompt: str, @@ -452,6 +439,7 @@ def prepare_extra_step_kwargs(self, generator, eta): extra_step_kwargs["generator"] = generator return extra_step_kwargs + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.check_inputs def check_inputs( self, prompt, @@ -543,6 +531,7 @@ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype latents = latents * self.scheduler.init_noise_sigma return latents + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl._get_add_time_ids def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype): add_time_ids = list(original_size + crops_coords_top_left + target_size) @@ -579,6 +568,7 @@ def upcast_vae(self): self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) + # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter._default_height_width def _default_height_width(self, height, width, image): # NOTE: It is possible that a list of images have different # dimensions for each image, so just checking the first image @@ -753,16 +743,14 @@ def __call__( `tuple`. When returning a tuple, the first element is a list with the generated images. """ # 0. Default height and width to unet - # height = height or self.default_sample_size * self.vae_scale_factor - # width = width or self.default_sample_size * self.vae_scale_factor height, width = self._default_height_width(height, width, image) device = self._execution_device adapter_input = _preprocess_adapter_image(image, height, width).to(device) - original_size = (height, width) #original_size or (height, width) - target_size = (height, width) #target_size or (height, width) + original_size = (height, width) + target_size = (height, width) # 1. Check inputs. Raise error if not correct self.check_inputs( @@ -795,9 +783,6 @@ def __call__( do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt - # text_encoder_lora_scale = ( - # cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None - # ) ( prompt_embeds, negative_prompt_embeds, @@ -815,7 +800,6 @@ def __call__( negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, - # lora_scale=text_encoder_lora_scale, ) # 4. Prepare timesteps @@ -925,7 +909,7 @@ def __call__( image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents - return StableDiffusionAdapterXLPipelineOutput(images=image) + return StableDiffusionXLPipelineOutput(images=image) image = self.image_processor.postprocess(image, output_type=output_type) @@ -936,4 +920,4 @@ def __call__( if not return_dict: return (image,) - return StableDiffusionAdapterXLPipelineOutput(images=image) + return StableDiffusionXLPipelineOutput(images=image) From c8a20effb4d5e4c009b68e1ef089fae70d028b15 Mon Sep 17 00:00:00 2001 From: Chong Date: Wed, 23 Aug 2023 13:30:51 +0800 Subject: [PATCH 06/36] modify pipeline --- .../t2i_adapter/pipeline_stable_diffusion_xl_adapter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py index b7bea3833767..66599ec5b2bc 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py @@ -749,8 +749,8 @@ def __call__( adapter_input = _preprocess_adapter_image(image, height, width).to(device) - original_size = (height, width) - target_size = (height, width) + original_size = original_size or (height, width) + target_size = target_size or (height, width) # 1. Check inputs. Raise error if not correct self.check_inputs( From 3dc1eba65163012422ff2e99f1649ef758a87e55 Mon Sep 17 00:00:00 2001 From: Chong Date: Wed, 23 Aug 2023 13:59:47 +0800 Subject: [PATCH 07/36] modify pipeline --- .../pipeline_stable_diffusion_xl_adapter.py | 26 +++++++++++++------ 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py index 66599ec5b2bc..e394e5501a28 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py @@ -48,15 +48,26 @@ Examples: ```py >>> import torch - >>> from diffusers import StableDiffusionXLPipeline + >>> from PIL import Image + >>> from diffusers import T2IAdapter, StableDiffusionXLAdapterPipeline, DDPMScheduler + >>> from pytorch_lightning import seed_everything + >>> from diffusers.models.unet_2d_condition import UNet2DConditionModel + >>> from diffusers.utils import load_image - >>> pipe = StableDiffusionXLPipeline.from_pretrained( - ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 - ... ) - >>> pipe = pipe.to("cuda") + >>> sketch_image = load_image('https://huggingface.co/Adapter/t2iadapter/resolve/main/sketch.png').convert('L') - >>> prompt = "a photo of an astronaut riding a horse on mars" - >>> image = pipe(prompt).images[0] + >>> model_id = 'stabilityai/stable-diffusion-xl-base-1.0' + + >>> adapter = T2IAdapter.from_pretrained("Adapter/t2iadapter", subfolder='sketch_sdxl_1.0',torch_dtype=torch.float16, adapter_type="full_adapter_xl") + >>> scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler") + + >>> pipe = StableDiffusionXLAdapterPipeline.from_pretrained( + model_id, adapter=adapter, safety_checker=None, torch_dtype=torch.float16, variant="fp16", scheduler=scheduler + ) + + >>> pipe.to('cuda') + >>> generator = torch.Generator().manual_seed(42) + >>> sketch_image_out = pipe(prompt='a photo of a dog in real world, high quality', negative_prompt='extra digit, fewer digits, cropped, worst quality, low quality', image=sketch_image, generator=generator, guidance_scale=7.5).images[0] ``` """ @@ -198,7 +209,6 @@ def disable_vae_tiling(self): """ self.vae.disable_tiling() - # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.enable_model_cpu_offload def enable_model_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared From e0f73a73e3c813625485d2087d57791956500144 Mon Sep 17 00:00:00 2001 From: Chong Date: Wed, 23 Aug 2023 14:18:26 +0800 Subject: [PATCH 08/36] modify pipeline --- .../t2i_adapter/pipeline_stable_diffusion_xl_adapter.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py index e394e5501a28..1b98acc33fbe 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py @@ -239,7 +239,7 @@ def enable_model_cpu_offload(self, gpu_id=0): # We'll offload the last model manually. self.final_offload_hook = hook - # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.encode_prompt + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt def encode_prompt( self, prompt: str, @@ -449,7 +449,7 @@ def prepare_extra_step_kwargs(self, generator, eta): extra_step_kwargs["generator"] = generator return extra_step_kwargs - # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.check_inputs + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.check_inputs def check_inputs( self, prompt, @@ -541,7 +541,7 @@ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype latents = latents * self.scheduler.init_noise_sigma return latents - # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl._get_add_time_ids + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype): add_time_ids = list(original_size + crops_coords_top_left + target_size) From b299b8fdaf58adc85038ebd67f421bf048995f41 Mon Sep 17 00:00:00 2001 From: Chong Date: Wed, 23 Aug 2023 14:35:28 +0800 Subject: [PATCH 09/36] modify pipeline --- src/diffusers/pipelines/__init__.py | 2 +- .../t2i_adapter/pipeline_stable_diffusion_xl_adapter.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index b3d8b765cee2..47d61d941e44 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -119,7 +119,7 @@ StableDiffusionXLPipeline, ) from .t2i_adapter import StableDiffusionAdapterPipeline - from .t2i_adapter import StableDiffusionAdapterPipeline + from .t2i_adapter import StableDiffusionXLAdapterPipeline from .text_to_video_synthesis import TextToVideoSDPipeline, TextToVideoZeroPipeline, VideoToVideoSDPipeline from .unclip import UnCLIPImageVariationPipeline, UnCLIPPipeline from .unidiffuser import ImageTextPipelineOutput, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py index 1b98acc33fbe..da9eba1fd689 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py @@ -62,7 +62,7 @@ >>> scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler") >>> pipe = StableDiffusionXLAdapterPipeline.from_pretrained( - model_id, adapter=adapter, safety_checker=None, torch_dtype=torch.float16, variant="fp16", scheduler=scheduler + model_id, adapter=adapter, torch_dtype=torch.float16, variant="fp16", scheduler=scheduler ) >>> pipe.to('cuda') @@ -578,7 +578,7 @@ def upcast_vae(self): self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) - # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter._default_height_width + # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width def _default_height_width(self, height, width, image): # NOTE: It is possible that a list of images have different # dimensions for each image, so just checking the first image From 98a6e6946291c3b75af5f111116f79a62e4a3898 Mon Sep 17 00:00:00 2001 From: Chong Date: Wed, 23 Aug 2023 15:01:31 +0800 Subject: [PATCH 10/36] modify modeling_text_unet --- .../pipelines/versatile_diffusion/modeling_text_unet.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py b/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py index 087abeb5f341..1d50e156cbbc 100644 --- a/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py +++ b/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py @@ -1137,6 +1137,9 @@ def forward( cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, ) + # To support T2I-Adapter-XL + if is_adapter and len(down_block_additional_residuals) > 0: + sample += down_block_additional_residuals.pop(0) if is_controlnet: sample = sample + mid_block_additional_residual From d146aa234213a9470fdf8854597077d1194aafce Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 23 Aug 2023 12:39:04 +0530 Subject: [PATCH 11/36] fix styling. --- src/diffusers/__init__.py | 2 +- src/diffusers/models/adapter.py | 8 ++-- src/diffusers/models/unet_2d_condition.py | 2 +- src/diffusers/pipelines/__init__.py | 3 +- .../pipeline_stable_diffusion_xl_adapter.py | 48 +++++++++++-------- .../versatile_diffusion/modeling_text_unet.py | 2 +- .../dummy_torch_and_transformers_objects.py | 15 +++++- 7 files changed, 51 insertions(+), 29 deletions(-) diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index bca89bb81048..b4f4e5a2407b 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -169,7 +169,6 @@ ShapEImg2ImgPipeline, ShapEPipeline, StableDiffusionAdapterPipeline, - StableDiffusionXLAdapterPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImg2ImgPipeline, StableDiffusionControlNetInpaintPipeline, @@ -193,6 +192,7 @@ StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableDiffusionXLControlNetImg2ImgPipeline, + StableDiffusionXLAdapterPipeline, StableDiffusionXLControlNetPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, diff --git a/src/diffusers/models/adapter.py b/src/diffusers/models/adapter.py index 0e01046daed8..830b37af4175 100644 --- a/src/diffusers/models/adapter.py +++ b/src/diffusers/models/adapter.py @@ -204,10 +204,10 @@ def __init__( self.body = [] # blocks to extract XL features with dimensions of [320, 64, 64], [640, 64, 64], [1280, 32, 32], [1280, 32, 32] for i in range(len(channels)): - if i==1: - self.body.append(AdapterBlock(channels[i-1], channels[i], num_res_blocks)) - elif i==2: - self.body.append(AdapterBlock(channels[i-1], channels[i], num_res_blocks, down=True)) + if i == 1: + self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks)) + elif i == 2: + self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True)) else: self.body.append(AdapterBlock(channels[i], channels[i], num_res_blocks)) diff --git a/src/diffusers/models/unet_2d_condition.py b/src/diffusers/models/unet_2d_condition.py index 89115284a7be..17db42f302ea 100644 --- a/src/diffusers/models/unet_2d_condition.py +++ b/src/diffusers/models/unet_2d_condition.py @@ -965,7 +965,7 @@ def forward( cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, ) - # To support T2I-Adapter-XL + # To support T2I-Adapter-XL if is_adapter and len(down_block_additional_residuals) > 0: sample += down_block_additional_residuals.pop(0) diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 47d61d941e44..1cca6643be21 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -118,8 +118,7 @@ StableDiffusionXLInstructPix2PixPipeline, StableDiffusionXLPipeline, ) - from .t2i_adapter import StableDiffusionAdapterPipeline - from .t2i_adapter import StableDiffusionXLAdapterPipeline + from .t2i_adapter import StableDiffusionAdapterPipeline, StableDiffusionXLAdapterPipeline from .text_to_video_synthesis import TextToVideoSDPipeline, TextToVideoZeroPipeline, VideoToVideoSDPipeline from .unclip import UnCLIPImageVariationPipeline, UnCLIPPipeline from .unidiffuser import ImageTextPipelineOutput, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py index da9eba1fd689..f1a1e64c206b 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py @@ -15,10 +15,12 @@ import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union +import numpy as np +import PIL import torch from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer -import PIL -import numpy as np + +from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput from ...image_processor import VaeImageProcessor from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin @@ -32,7 +34,6 @@ from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( PIL_INTERPOLATION, - BaseOutput, is_accelerate_available, is_accelerate_version, logging, @@ -40,7 +41,7 @@ replace_example_docstring, ) from ..pipeline_utils import DiffusionPipeline -from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput + logger = logging.get_logger(__name__) # pylint: disable=invalid-name @@ -48,29 +49,37 @@ Examples: ```py >>> import torch - >>> from PIL import Image >>> from diffusers import T2IAdapter, StableDiffusionXLAdapterPipeline, DDPMScheduler - >>> from pytorch_lightning import seed_everything - >>> from diffusers.models.unet_2d_condition import UNet2DConditionModel >>> from diffusers.utils import load_image - >>> sketch_image = load_image('https://huggingface.co/Adapter/t2iadapter/resolve/main/sketch.png').convert('L') + >>> sketch_image = load_image("https://huggingface.co/Adapter/t2iadapter/resolve/main/sketch.png").convert("L") - >>> model_id = 'stabilityai/stable-diffusion-xl-base-1.0' + >>> model_id = "stabilityai/stable-diffusion-xl-base-1.0" - >>> adapter = T2IAdapter.from_pretrained("Adapter/t2iadapter", subfolder='sketch_sdxl_1.0',torch_dtype=torch.float16, adapter_type="full_adapter_xl") + >>> adapter = T2IAdapter.from_pretrained( + ... "Adapter/t2iadapter", + ... subfolder="sketch_sdxl_1.0", + ... torch_dtype=torch.float16, + ... adapter_type="full_adapter_xl", + ... ) >>> scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler") >>> pipe = StableDiffusionXLAdapterPipeline.from_pretrained( - model_id, adapter=adapter, torch_dtype=torch.float16, variant="fp16", scheduler=scheduler - ) - - >>> pipe.to('cuda') - >>> generator = torch.Generator().manual_seed(42) - >>> sketch_image_out = pipe(prompt='a photo of a dog in real world, high quality', negative_prompt='extra digit, fewer digits, cropped, worst quality, low quality', image=sketch_image, generator=generator, guidance_scale=7.5).images[0] + ... model_id, adapter=adapter, torch_dtype=torch.float16, variant="fp16", scheduler=scheduler + ... ).to("cuda") + + >>> generator = torch.manual_seed(42) + >>> sketch_image_out = pipe( + ... prompt="a photo of a dog in real world, high quality", + ... negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality", + ... image=sketch_image, + ... generator=generator, + ... guidance_scale=7.5, + ... ).images[0] ``` """ + def _preprocess_adapter_image(image, height, width): if isinstance(image, torch.Tensor): return image @@ -97,6 +106,7 @@ def _preprocess_adapter_image(image, height, width): ) return image + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ @@ -710,8 +720,8 @@ def __call__( The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionAdapterPipelineOutput`] instead - of a plain tuple. + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionAdapterPipelineOutput`] + instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. @@ -759,7 +769,7 @@ def __call__( adapter_input = _preprocess_adapter_image(image, height, width).to(device) - original_size = original_size or (height, width) + original_size = original_size or (height, width) target_size = target_size or (height, width) # 1. Check inputs. Raise error if not correct diff --git a/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py b/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py index 1d50e156cbbc..3b33dc218377 100644 --- a/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py +++ b/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py @@ -1137,7 +1137,7 @@ def forward( cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, ) - # To support T2I-Adapter-XL + # To support T2I-Adapter-XL if is_adapter and len(down_block_additional_residuals) > 0: sample += down_block_additional_residuals.pop(0) diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index 72ceae002125..5c75bb89bd26 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -901,8 +901,21 @@ def from_config(cls, *args, **kwargs): def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) - class StableDiffusionXLControlNetImg2ImgPipeline(metaclass=DummyObject): + backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + +class StableDiffusionXLAdapterPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): From 512e8b4097db69483054029b312550b569c9785d Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 23 Aug 2023 12:42:11 +0530 Subject: [PATCH 12/36] fix: copies. --- .../t2i_adapter/pipeline_stable_diffusion_xl_adapter.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py index f1a1e64c206b..a9210bd8019f 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py @@ -219,6 +219,7 @@ def disable_vae_tiling(self): """ self.vae.disable_tiling() + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.enable_model_cpu_offload def enable_model_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared @@ -240,7 +241,7 @@ def enable_model_cpu_offload(self, gpu_id=0): model_sequence = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) - model_sequence.extend([self.unet, self.vae, self.adapter]) + model_sequence.extend([self.unet, self.vae]) hook = None for cpu_offloaded_model in model_sequence: From db7e9b2f3e38d50f626393931f61d77463cf9a70 Mon Sep 17 00:00:00 2001 From: Chong Date: Wed, 23 Aug 2023 16:59:56 +0800 Subject: [PATCH 13/36] adapter settings --- src/diffusers/models/adapter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/models/adapter.py b/src/diffusers/models/adapter.py index 830b37af4175..0047b2db1a7e 100644 --- a/src/diffusers/models/adapter.py +++ b/src/diffusers/models/adapter.py @@ -279,7 +279,7 @@ class LightAdapter(nn.Module): def __init__( self, in_channels: int = 3, - channels: List[int] = [320, 640, 1280], + channels: List[int] = [320, 640, 1280, 1280], num_res_blocks: int = 4, downscale_factor: int = 8, ): From cc43a7ba631409747e42f368a57d41e20729c613 Mon Sep 17 00:00:00 2001 From: Chong Date: Wed, 23 Aug 2023 19:33:09 +0800 Subject: [PATCH 14/36] new test case --- .../test_stable_diffusion_adapter.py | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py index a4f522062e34..e2ea0b82d1f8 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py @@ -48,24 +48,34 @@ class AdapterTests: def get_dummy_components(self, adapter_type): torch.manual_seed(0) + if adapter_type == 'light_adapter': + channels = [32, 32, 32] + else: + channels = [32, 32, 32, 32] + torch.manual_seed(0) unet = UNet2DConditionModel( - block_out_channels=(32, 64), + block_out_channels=[32, 32, 32, 32], layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, - down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + down_block_types=( + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "DownBlock2D", + ), + up_block_types= ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( - block_out_channels=[32, 64], + block_out_channels=[32, 32, 32, 32], in_channels=3, out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) From fa7a21877fe15dad5c52752c6976958b48def20c Mon Sep 17 00:00:00 2001 From: Chong Date: Wed, 23 Aug 2023 19:34:43 +0800 Subject: [PATCH 15/36] new test case --- .../stable_diffusion/test_stable_diffusion_adapter.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py index e2ea0b82d1f8..716f85ee47a1 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py @@ -186,11 +186,8 @@ def test_stable_diffusion_adapter_default_case(self): inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) - expected_slice = np.array([0.4858, 0.5500, 0.4278, 0.4669, 0.6184, 0.4322, 0.5010, 0.5033, 0.4746]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 class StableDiffusionLightAdapterPipelineFastTests(AdapterTests, PipelineTesterMixin, unittest.TestCase): @@ -206,11 +203,8 @@ def test_stable_diffusion_adapter_default_case(self): inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) - expected_slice = np.array([0.4965, 0.5548, 0.4330, 0.4771, 0.6226, 0.4382, 0.5037, 0.5071, 0.4782]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 class StableDiffusionMultiAdapterPipelineFastTests(AdapterTests, PipelineTesterMixin, unittest.TestCase): From dc203bd079929f303fbda0aa976dfc120d371f8f Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 23 Aug 2023 15:53:35 +0530 Subject: [PATCH 16/36] debugging --- src/diffusers/models/unet_2d_condition.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/diffusers/models/unet_2d_condition.py b/src/diffusers/models/unet_2d_condition.py index 17db42f302ea..cd860fe4eaec 100644 --- a/src/diffusers/models/unet_2d_condition.py +++ b/src/diffusers/models/unet_2d_condition.py @@ -966,6 +966,7 @@ def forward( encoder_attention_mask=encoder_attention_mask, ) # To support T2I-Adapter-XL + print(f"From UNet: {len(down_block_additional_residuals)}") if is_adapter and len(down_block_additional_residuals) > 0: sample += down_block_additional_residuals.pop(0) From 708ee6af95e445c1911173a5bb2d95d54ce6e767 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 23 Aug 2023 15:58:51 +0530 Subject: [PATCH 17/36] debugging --- src/diffusers/models/unet_2d_condition.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/diffusers/models/unet_2d_condition.py b/src/diffusers/models/unet_2d_condition.py index cd860fe4eaec..d6c8f555c898 100644 --- a/src/diffusers/models/unet_2d_condition.py +++ b/src/diffusers/models/unet_2d_condition.py @@ -920,6 +920,7 @@ def forward( is_adapter = mid_block_additional_residual is None and down_block_additional_residuals is not None down_block_res_samples = (sample,) + print(f"From UNet: {len(down_block_additional_residuals)}") for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: # For t2i-adapter CrossAttnDownBlock2D From 4877a70b7cdc3f97a5de7f737c226c1baec97f59 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 23 Aug 2023 16:00:46 +0530 Subject: [PATCH 18/36] debugging --- src/diffusers/models/adapter.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/diffusers/models/adapter.py b/src/diffusers/models/adapter.py index 0047b2db1a7e..fde404d27197 100644 --- a/src/diffusers/models/adapter.py +++ b/src/diffusers/models/adapter.py @@ -155,6 +155,7 @@ def __init__( downscale_factor: int = 8, ): super().__init__() + print(f"From {self.__class__.name} channels: {channels}.") in_channels = in_channels * downscale_factor**2 @@ -284,6 +285,7 @@ def __init__( downscale_factor: int = 8, ): super().__init__() + print(f"From {self.__class__.name} channels: {channels}.") in_channels = in_channels * downscale_factor**2 From c518df55f8a6abbc23fbb2a4c72b3d9876a8ab22 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 23 Aug 2023 16:01:35 +0530 Subject: [PATCH 19/36] debugging --- src/diffusers/models/adapter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/diffusers/models/adapter.py b/src/diffusers/models/adapter.py index fde404d27197..146dae8abe8a 100644 --- a/src/diffusers/models/adapter.py +++ b/src/diffusers/models/adapter.py @@ -155,7 +155,7 @@ def __init__( downscale_factor: int = 8, ): super().__init__() - print(f"From {self.__class__.name} channels: {channels}.") + print(f"From {self.__class__} channels: {channels}.") in_channels = in_channels * downscale_factor**2 @@ -285,7 +285,7 @@ def __init__( downscale_factor: int = 8, ): super().__init__() - print(f"From {self.__class__.name} channels: {channels}.") + print(f"From {self.__class__} channels: {channels}.") in_channels = in_channels * downscale_factor**2 From 3280104425df4c93cd8f8045b24c6552ff15655b Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 23 Aug 2023 16:13:34 +0530 Subject: [PATCH 20/36] debugging --- src/diffusers/models/unet_2d_condition.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/diffusers/models/unet_2d_condition.py b/src/diffusers/models/unet_2d_condition.py index d6c8f555c898..4ee7fbe6ecce 100644 --- a/src/diffusers/models/unet_2d_condition.py +++ b/src/diffusers/models/unet_2d_condition.py @@ -920,7 +920,7 @@ def forward( is_adapter = mid_block_additional_residual is None and down_block_additional_residuals is not None down_block_res_samples = (sample,) - print(f"From UNet: {len(down_block_additional_residuals)}") + print(f"From UNet before down blocks: {len(down_block_additional_residuals)}") for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: # For t2i-adapter CrossAttnDownBlock2D @@ -967,7 +967,7 @@ def forward( encoder_attention_mask=encoder_attention_mask, ) # To support T2I-Adapter-XL - print(f"From UNet: {len(down_block_additional_residuals)}") + print(f"From UNet in mid block: {len(down_block_additional_residuals)}") if is_adapter and len(down_block_additional_residuals) > 0: sample += down_block_additional_residuals.pop(0) From 0dc053f9c64844bf30287d680564091ff9460d90 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 23 Aug 2023 16:19:56 +0530 Subject: [PATCH 21/36] debugging --- .../pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py index 1ee6f9296d5a..53d71bcea39b 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py @@ -741,6 +741,7 @@ def __call__( latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual + print(f"From pipeline: {len(adapter_state)}.") noise_pred = self.unet( latent_model_input, t, From 5ceed5fcdbc1c342dbfd6f15181b2c7d22ebc104 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 23 Aug 2023 16:25:35 +0530 Subject: [PATCH 22/36] debugging --- .../pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py index 53d71bcea39b..9ebf59f5ed67 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py @@ -724,6 +724,7 @@ def __call__( # 7. Denoising loop adapter_state = self.adapter(adapter_input) + print(f"From pipeline (before rejigging): {len(adapter_state)}.") for k, v in enumerate(adapter_state): adapter_state[k] = v * adapter_conditioning_scale if num_images_per_prompt > 1: @@ -732,6 +733,7 @@ def __call__( if do_classifier_free_guidance: for k, v in enumerate(adapter_state): adapter_state[k] = torch.cat([v] * 2, dim=0) + print(f"From pipeline (after rejigging): {len(adapter_state)}.") num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: @@ -741,7 +743,6 @@ def __call__( latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual - print(f"From pipeline: {len(adapter_state)}.") noise_pred = self.unet( latent_model_input, t, From ae98b485c4f2a45b2b0a063e61a0f3a1e4a34196 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 23 Aug 2023 16:30:51 +0530 Subject: [PATCH 23/36] debugging --- src/diffusers/models/adapter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/diffusers/models/adapter.py b/src/diffusers/models/adapter.py index 146dae8abe8a..ae9ccf83e04f 100644 --- a/src/diffusers/models/adapter.py +++ b/src/diffusers/models/adapter.py @@ -183,7 +183,7 @@ def forward(self, x: torch.Tensor) -> List[torch.Tensor]: for block in self.body: x = block(x) features.append(x) - + print(f"Number of features: {len(features)}") return features @@ -312,7 +312,7 @@ def forward(self, x): for block in self.body: x = block(x) features.append(x) - + print(f"Number of features: {len(features)}") return features From d595689c257d389f2bb6a9a66dd2bfd732f2265a Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 23 Aug 2023 17:01:25 +0530 Subject: [PATCH 24/36] revert prints. --- src/diffusers/models/adapter.py | 5 ++--- src/diffusers/models/unet_2d_condition.py | 2 -- .../t2i_adapter/pipeline_stable_diffusion_adapter.py | 2 -- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/src/diffusers/models/adapter.py b/src/diffusers/models/adapter.py index ae9ccf83e04f..95193de868bc 100644 --- a/src/diffusers/models/adapter.py +++ b/src/diffusers/models/adapter.py @@ -155,7 +155,6 @@ def __init__( downscale_factor: int = 8, ): super().__init__() - print(f"From {self.__class__} channels: {channels}.") in_channels = in_channels * downscale_factor**2 @@ -183,7 +182,7 @@ def forward(self, x: torch.Tensor) -> List[torch.Tensor]: for block in self.body: x = block(x) features.append(x) - print(f"Number of features: {len(features)}") + return features @@ -312,7 +311,7 @@ def forward(self, x): for block in self.body: x = block(x) features.append(x) - print(f"Number of features: {len(features)}") + return features diff --git a/src/diffusers/models/unet_2d_condition.py b/src/diffusers/models/unet_2d_condition.py index 4ee7fbe6ecce..17db42f302ea 100644 --- a/src/diffusers/models/unet_2d_condition.py +++ b/src/diffusers/models/unet_2d_condition.py @@ -920,7 +920,6 @@ def forward( is_adapter = mid_block_additional_residual is None and down_block_additional_residuals is not None down_block_res_samples = (sample,) - print(f"From UNet before down blocks: {len(down_block_additional_residuals)}") for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: # For t2i-adapter CrossAttnDownBlock2D @@ -967,7 +966,6 @@ def forward( encoder_attention_mask=encoder_attention_mask, ) # To support T2I-Adapter-XL - print(f"From UNet in mid block: {len(down_block_additional_residuals)}") if is_adapter and len(down_block_additional_residuals) > 0: sample += down_block_additional_residuals.pop(0) diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py index 9ebf59f5ed67..1ee6f9296d5a 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py @@ -724,7 +724,6 @@ def __call__( # 7. Denoising loop adapter_state = self.adapter(adapter_input) - print(f"From pipeline (before rejigging): {len(adapter_state)}.") for k, v in enumerate(adapter_state): adapter_state[k] = v * adapter_conditioning_scale if num_images_per_prompt > 1: @@ -733,7 +732,6 @@ def __call__( if do_classifier_free_guidance: for k, v in enumerate(adapter_state): adapter_state[k] = torch.cat([v] * 2, dim=0) - print(f"From pipeline (after rejigging): {len(adapter_state)}.") num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: From 7a570b661c5ed0eb8014e77c50965689aa335f23 Mon Sep 17 00:00:00 2001 From: Chong Date: Wed, 23 Aug 2023 19:37:57 +0800 Subject: [PATCH 25/36] new test case --- src/diffusers/models/adapter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/models/adapter.py b/src/diffusers/models/adapter.py index 95193de868bc..9ccd5e69c562 100644 --- a/src/diffusers/models/adapter.py +++ b/src/diffusers/models/adapter.py @@ -279,7 +279,7 @@ class LightAdapter(nn.Module): def __init__( self, in_channels: int = 3, - channels: List[int] = [320, 640, 1280, 1280], + channels: List[int] = [320, 640, 1280], num_res_blocks: int = 4, downscale_factor: int = 8, ): From e1c60a1a10b14d6be8169788b67bace167c52d77 Mon Sep 17 00:00:00 2001 From: Chong Date: Wed, 23 Aug 2023 19:46:19 +0800 Subject: [PATCH 26/36] remove print --- src/diffusers/models/adapter.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/diffusers/models/adapter.py b/src/diffusers/models/adapter.py index 9ccd5e69c562..26b5b6592b79 100644 --- a/src/diffusers/models/adapter.py +++ b/src/diffusers/models/adapter.py @@ -284,7 +284,6 @@ def __init__( downscale_factor: int = 8, ): super().__init__() - print(f"From {self.__class__} channels: {channels}.") in_channels = in_channels * downscale_factor**2 From 8e78422e181bda43ffbb6fa6657848123fa37996 Mon Sep 17 00:00:00 2001 From: Chong Date: Wed, 23 Aug 2023 20:25:06 +0800 Subject: [PATCH 27/36] org test case --- src/diffusers/models/unet_2d_condition.py | 2 +- .../test_stable_diffusion_adapter.py | 30 ++++++++----------- 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/src/diffusers/models/unet_2d_condition.py b/src/diffusers/models/unet_2d_condition.py index 17db42f302ea..1baa8296504b 100644 --- a/src/diffusers/models/unet_2d_condition.py +++ b/src/diffusers/models/unet_2d_condition.py @@ -966,7 +966,7 @@ def forward( encoder_attention_mask=encoder_attention_mask, ) # To support T2I-Adapter-XL - if is_adapter and len(down_block_additional_residuals) > 0: + if is_adapter and len(down_block_additional_residuals) > 0 and sample.shape == down_block_additional_residuals[0].shape: sample += down_block_additional_residuals.pop(0) if is_controlnet: diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py index 716f85ee47a1..9a9eb547de18 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py @@ -48,34 +48,24 @@ class AdapterTests: def get_dummy_components(self, adapter_type): torch.manual_seed(0) - if adapter_type == 'light_adapter': - channels = [32, 32, 32] - else: - channels = [32, 32, 32, 32] - torch.manual_seed(0) unet = UNet2DConditionModel( - block_out_channels=[32, 32, 32, 32], + block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, - down_block_types=( - "CrossAttnDownBlock2D", - "CrossAttnDownBlock2D", - "CrossAttnDownBlock2D", - "DownBlock2D", - ), - up_block_types= ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"), + down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( - block_out_channels=[32, 32, 32, 32], + block_out_channels=[32, 64], in_channels=3, out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) @@ -186,8 +176,11 @@ def test_stable_diffusion_adapter_default_case(self): inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4858, 0.5500, 0.4278, 0.4669, 0.6184, 0.4322, 0.5010, 0.5033, 0.4746]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 class StableDiffusionLightAdapterPipelineFastTests(AdapterTests, PipelineTesterMixin, unittest.TestCase): @@ -203,8 +196,11 @@ def test_stable_diffusion_adapter_default_case(self): inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4965, 0.5548, 0.4330, 0.4771, 0.6226, 0.4382, 0.5037, 0.5071, 0.4782]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 class StableDiffusionMultiAdapterPipelineFastTests(AdapterTests, PipelineTesterMixin, unittest.TestCase): @@ -559,4 +555,4 @@ def test_stable_diffusion_adapter_pipeline_with_sequential_cpu_offloading(self): pipe(prompt="foo", image=image, num_inference_steps=2) mem_bytes = torch.cuda.max_memory_allocated() - assert mem_bytes < 5 * 10**9 + assert mem_bytes < 5 * 10**9 \ No newline at end of file From 9cc021ab9fee33dd87269346f42d062e70ce9d31 Mon Sep 17 00:00:00 2001 From: Chong Date: Wed, 23 Aug 2023 22:13:27 +0800 Subject: [PATCH 28/36] add test_pipeline --- .../test_stable_diffusion_xl_adapter.py | 149 ++++++++++++++++++ 1 file changed, 149 insertions(+) create mode 100644 tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py new file mode 100644 index 000000000000..57b4738cebbc --- /dev/null +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py @@ -0,0 +1,149 @@ +# coding=utf-8 +# Copyright 2023 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + EulerDiscreteScheduler, + StableDiffusionXLAdapterPipeline, + UNet2DConditionModel, + T2IAdapter +) +from diffusers.utils import floats_tensor +from diffusers.utils.testing_utils import enable_full_determinism +from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + +class StableDiffusionXLAdapterPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = StableDiffusionXLAdapterPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + ) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + adapter = T2IAdapter( + in_channels=3, + channels=[32, 64], + num_res_blocks=2, + downscale_factor=4, + adapter_type='full_adapter_xl', + ) + components = { + "adapter": adapter, + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + # "safety_checker": None, + # "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "numpy", + } + return inputs + + def test_stable_diffusion_adapter_default_case(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLAdapterPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.5752919, 0.6022097, 0.4728038, 0.49861962, 0.57084894, 0.4644975, 0.5193715, 0.5133664, 0.4729858]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 \ No newline at end of file From 714a39b3095f94f263d95e28815fc8cbefadee19 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 23 Aug 2023 20:09:48 +0530 Subject: [PATCH 29/36] styling. --- src/diffusers/models/adapter.py | 4 ++-- src/diffusers/models/unet_2d_condition.py | 6 +++++- .../test_stable_diffusion_adapter.py | 2 +- .../test_stable_diffusion_xl_adapter.py | 14 +++++++++----- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/src/diffusers/models/adapter.py b/src/diffusers/models/adapter.py index 26b5b6592b79..830b37af4175 100644 --- a/src/diffusers/models/adapter.py +++ b/src/diffusers/models/adapter.py @@ -182,7 +182,7 @@ def forward(self, x: torch.Tensor) -> List[torch.Tensor]: for block in self.body: x = block(x) features.append(x) - + return features @@ -310,7 +310,7 @@ def forward(self, x): for block in self.body: x = block(x) features.append(x) - + return features diff --git a/src/diffusers/models/unet_2d_condition.py b/src/diffusers/models/unet_2d_condition.py index 1baa8296504b..1994649f4c59 100644 --- a/src/diffusers/models/unet_2d_condition.py +++ b/src/diffusers/models/unet_2d_condition.py @@ -966,7 +966,11 @@ def forward( encoder_attention_mask=encoder_attention_mask, ) # To support T2I-Adapter-XL - if is_adapter and len(down_block_additional_residuals) > 0 and sample.shape == down_block_additional_residuals[0].shape: + if ( + is_adapter + and len(down_block_additional_residuals) > 0 + and sample.shape == down_block_additional_residuals[0].shape + ): sample += down_block_additional_residuals.pop(0) if is_controlnet: diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py index 9a9eb547de18..a4f522062e34 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py @@ -555,4 +555,4 @@ def test_stable_diffusion_adapter_pipeline_with_sequential_cpu_offloading(self): pipe(prompt="foo", image=image, num_inference_steps=2) mem_bytes = torch.cuda.max_memory_allocated() - assert mem_bytes < 5 * 10**9 \ No newline at end of file + assert mem_bytes < 5 * 10**9 diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py index 57b4738cebbc..ad8dc99ccec8 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py @@ -24,17 +24,19 @@ AutoencoderKL, EulerDiscreteScheduler, StableDiffusionXLAdapterPipeline, + T2IAdapter, UNet2DConditionModel, - T2IAdapter ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import enable_full_determinism -from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS + +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() + class StableDiffusionXLAdapterPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionXLAdapterPipeline params = TEXT_TO_IMAGE_PARAMS @@ -101,7 +103,7 @@ def get_dummy_components(self): channels=[32, 64], num_res_blocks=2, downscale_factor=4, - adapter_type='full_adapter_xl', + adapter_type="full_adapter_xl", ) components = { "adapter": adapter, @@ -145,5 +147,7 @@ def test_stable_diffusion_adapter_default_case(self): image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) - expected_slice = np.array([0.5752919, 0.6022097, 0.4728038, 0.49861962, 0.57084894, 0.4644975, 0.5193715, 0.5133664, 0.4729858]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 \ No newline at end of file + expected_slice = np.array( + [0.5752919, 0.6022097, 0.4728038, 0.49861962, 0.57084894, 0.4644975, 0.5193715, 0.5133664, 0.4729858] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 From 7bb5d5be8b2c5eed7535e90a3dc0c478d670be87 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 23 Aug 2023 20:10:34 +0530 Subject: [PATCH 30/36] fix copies. --- .../pipelines/versatile_diffusion/modeling_text_unet.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py b/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py index 3b33dc218377..3fd9695c2d43 100644 --- a/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py +++ b/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py @@ -1138,7 +1138,11 @@ def forward( encoder_attention_mask=encoder_attention_mask, ) # To support T2I-Adapter-XL - if is_adapter and len(down_block_additional_residuals) > 0: + if ( + is_adapter + and len(down_block_additional_residuals) > 0 + and sample.shape == down_block_additional_residuals[0].shape + ): sample += down_block_additional_residuals.pop(0) if is_controlnet: From b35979393ef37809dc69b4515317acebfb36edd6 Mon Sep 17 00:00:00 2001 From: Chong Date: Thu, 24 Aug 2023 01:01:31 +0800 Subject: [PATCH 31/36] modify test parameter --- .../stable_diffusion_xl/test_stable_diffusion_xl_adapter.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py index ad8dc99ccec8..3bd4dca4e26c 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py @@ -30,7 +30,7 @@ from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import enable_full_determinism -from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS from ..test_pipelines_common import PipelineTesterMixin @@ -39,8 +39,8 @@ class StableDiffusionXLAdapterPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionXLAdapterPipeline - params = TEXT_TO_IMAGE_PARAMS - batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) From 5d73983e596b87344b7303a4579a099bfb1549e8 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Thu, 24 Aug 2023 07:44:30 +0530 Subject: [PATCH 32/36] style. --- .../stable_diffusion_xl/test_stable_diffusion_xl_adapter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py index 3bd4dca4e26c..afe7da3319c7 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py @@ -30,7 +30,7 @@ from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import enable_full_determinism -from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS +from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin From 297e776f1b4fc722132416fb1063c33e52d1bae4 Mon Sep 17 00:00:00 2001 From: Chong Date: Mon, 28 Aug 2023 15:44:27 +0800 Subject: [PATCH 33/36] add adapter-xl doc --- .../api/pipelines/stable_diffusion/adapter.md | 75 ++++++++++++++++++- 1 file changed, 73 insertions(+), 2 deletions(-) diff --git a/docs/source/en/api/pipelines/stable_diffusion/adapter.md b/docs/source/en/api/pipelines/stable_diffusion/adapter.md index 75b4f186e6be..24f50ca80086 100644 --- a/docs/source/en/api/pipelines/stable_diffusion/adapter.md +++ b/docs/source/en/api/pipelines/stable_diffusion/adapter.md @@ -29,10 +29,11 @@ This model was contributed by the community contributor [HimariO](https://github | Pipeline | Tasks | Demo |---|---|:---:| | [StableDiffusionAdapterPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_adapter.py) | *Text-to-Image Generation with T2I-Adapter Conditioning* | - +| [StableDiffusionXLAdapterPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_xl_adapter.py) | *Text-to-Image Generation with T2I-Adapter Conditioning on StableDiffusion-XL* | - -## Usage example +## Usage example with the base model of StableDiffusion-1.4/1.5 -In the following we give a simple example of how to use a *T2IAdapter* checkpoint with Diffusers for inference. +In the following we give a simple example of how to use a *T2IAdapter* checkpoint with Diffusers for inference based on StableDiffusion-1.4/1.5. All adapters use the same pipeline. 1. Images are first converted into the appropriate *control image* format. @@ -93,6 +94,62 @@ out_image = pipe( ![img](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/t2i-adapter/color_output.png) +## Usage example with the base model of StableDiffusion-XL + +In the following we give a simple example of how to use a *T2IAdapter* checkpoint with Diffusers for inference based on StableDiffusion-XL. +All adapters use the same pipeline. + + 1. Images are first downloaded into the appropriate *control image* format. + 2. The *control image* and *prompt* are passed to the [`StableDiffusionXLAdapterPipeline`]. + +Let's have a look at a simple example using the [Sketch Adapter](https://huggingface.co/Adapter/t2iadapter/tree/main/sketch_sdxl_1.0). + +```python +from diffusers.utils import load_image + +sketch_image = load_image('https://huggingface.co/Adapter/t2iadapter/resolve/main/sketch.png').convert('L') +``` + +![img](https://huggingface.co/Adapter/t2iadapter/resolve/main/sketch.png) + +Then, create the adapter pipeline + +```py +import torch +from diffusers import ( + T2IAdapter, + StableDiffusionXLAdapterPipeline, + DDPMScheduler +) +from diffusers.models.unet_2d_condition import UNet2DConditionModel + +model_id = 'stabilityai/stable-diffusion-xl-base-1.0' +adapter = T2IAdapter.from_pretrained("Adapter/t2iadapter", subfolder='sketch_sdxl_1.0',torch_dtype=torch.float16, adapter_type="full_adapter_xl") +scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler") + +pipe = StableDiffusionXLAdapterPipeline.from_pretrained( + model_id, adapter=adapter, safety_checker=None, torch_dtype=torch.float16, variant="fp16", scheduler=scheduler +) + +pipe.to('cuda') +``` + +Finally, pass the prompt and control image to the pipeline + +```py +# fix the random seed, so you will get the same result as the example +generator = torch.Generator().manual_seed(42) + +sketch_image_out = pipe( + prompt='a photo of a dog in real world, high quality', + negative_prompt='extra digit, fewer digits, cropped, worst quality, low quality', + image=sketch_image, + generator=generator, + guidance_scale=7.5 +).images[0] +``` + +![img](https://huggingface.co/Adapter/t2iadapter/resolve/main/sketch_output.png) ## Available checkpoints @@ -113,6 +170,9 @@ Non-diffusers checkpoints can be found under [TencentARC/T2I-Adapter](https://hu |[TencentARC/t2iadapter_depth_sd15v2](https://huggingface.co/TencentARC/t2iadapter_depth_sd15v2)|| |[TencentARC/t2iadapter_sketch_sd15v2](https://huggingface.co/TencentARC/t2iadapter_sketch_sd15v2)|| |[TencentARC/t2iadapter_zoedepth_sd15v1](https://huggingface.co/TencentARC/t2iadapter_zoedepth_sd15v1)|| +|[Adapter/t2iadapter, subfolder='sketch_sdxl_1.0'](https://huggingface.co/Adapter/t2iadapter/tree/main/sketch_sdxl_1.0)|| +|[Adapter/t2iadapter, subfolder='canny_sdxl_1.0'](https://huggingface.co/Adapter/t2iadapter/tree/main/canny_sdxl_1.0)|| +|[Adapter/t2iadapter, subfolder='openpose_sdxl_1.0'](https://huggingface.co/Adapter/t2iadapter/tree/main/openpose_sdxl_1.0)|| ## Combining multiple adapters @@ -185,3 +245,14 @@ However, T2I-Adapter performs slightly worse than ControlNet. - disable_vae_slicing - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention + +## StableDiffusionXLAdapterPipeline +[[autodoc]] StableDiffusionXLAdapterPipeline + - all + - __call__ + - enable_attention_slicing + - disable_attention_slicing + - enable_vae_slicing + - disable_vae_slicing + - enable_xformers_memory_efficient_attention + - disable_xformers_memory_efficient_attention From 7857fa95988d3c59d3410bdd97c07db8eb235dc5 Mon Sep 17 00:00:00 2001 From: Chong Date: Mon, 28 Aug 2023 16:58:31 +0800 Subject: [PATCH 34/36] double quotes in docs --- .../en/api/pipelines/stable_diffusion/adapter.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/source/en/api/pipelines/stable_diffusion/adapter.md b/docs/source/en/api/pipelines/stable_diffusion/adapter.md index 24f50ca80086..4c7415ddb02b 100644 --- a/docs/source/en/api/pipelines/stable_diffusion/adapter.md +++ b/docs/source/en/api/pipelines/stable_diffusion/adapter.md @@ -107,7 +107,7 @@ Let's have a look at a simple example using the [Sketch Adapter](https://hugging ```python from diffusers.utils import load_image -sketch_image = load_image('https://huggingface.co/Adapter/t2iadapter/resolve/main/sketch.png').convert('L') +sketch_image = load_image("https://huggingface.co/Adapter/t2iadapter/resolve/main/sketch.png").convert("L") ``` ![img](https://huggingface.co/Adapter/t2iadapter/resolve/main/sketch.png) @@ -123,15 +123,15 @@ from diffusers import ( ) from diffusers.models.unet_2d_condition import UNet2DConditionModel -model_id = 'stabilityai/stable-diffusion-xl-base-1.0' -adapter = T2IAdapter.from_pretrained("Adapter/t2iadapter", subfolder='sketch_sdxl_1.0',torch_dtype=torch.float16, adapter_type="full_adapter_xl") +model_id = "stabilityai/stable-diffusion-xl-base-1.0" +adapter = T2IAdapter.from_pretrained("Adapter/t2iadapter", subfolder="sketch_sdxl_1.0",torch_dtype=torch.float16, adapter_type="full_adapter_xl") scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler") pipe = StableDiffusionXLAdapterPipeline.from_pretrained( model_id, adapter=adapter, safety_checker=None, torch_dtype=torch.float16, variant="fp16", scheduler=scheduler ) -pipe.to('cuda') +pipe.to("cuda") ``` Finally, pass the prompt and control image to the pipeline @@ -141,8 +141,8 @@ Finally, pass the prompt and control image to the pipeline generator = torch.Generator().manual_seed(42) sketch_image_out = pipe( - prompt='a photo of a dog in real world, high quality', - negative_prompt='extra digit, fewer digits, cropped, worst quality, low quality', + prompt="a photo of a dog in real world, high quality", + negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality", image=sketch_image, generator=generator, guidance_scale=7.5 From 9b502fd22bc7885613e50715e50a4c63e181fbf1 Mon Sep 17 00:00:00 2001 From: Chong Date: Tue, 29 Aug 2023 11:35:16 +0800 Subject: [PATCH 35/36] Fix potential type mismatch --- .../pipeline_stable_diffusion_xl_adapter.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py index a9210bd8019f..733214d55658 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py @@ -920,14 +920,20 @@ def __call__( progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast - # make sure the VAE is in float32 mode, as it overflows in float16 - if self.vae.dtype == torch.float16 and self.vae.config.force_upcast: - self.upcast_vae() - latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) - if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) else: image = latents return StableDiffusionXLPipelineOutput(images=image) From d5c901d57d9a6acb84318184cba0b37380deefc3 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Tue, 29 Aug 2023 10:15:03 +0530 Subject: [PATCH 36/36] style. --- src/diffusers/__init__.py | 2 +- .../t2i_adapter/pipeline_stable_diffusion_xl_adapter.py | 2 +- .../utils/dummy_torch_and_transformers_objects.py | 8 +++++--- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index b4f4e5a2407b..652356d1893c 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -191,8 +191,8 @@ StableDiffusionPix2PixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, - StableDiffusionXLControlNetImg2ImgPipeline, StableDiffusionXLAdapterPipeline, + StableDiffusionXLControlNetImg2ImgPipeline, StableDiffusionXLControlNetPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py index 733214d55658..6311c02be475 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py @@ -920,7 +920,7 @@ def __call__( progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) - + if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index 5c75bb89bd26..11169d12e212 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -901,8 +901,9 @@ def from_config(cls, *args, **kwargs): def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) -class StableDiffusionXLControlNetImg2ImgPipeline(metaclass=DummyObject): - backends = ["torch", "transformers"] + +class StableDiffusionXLAdapterPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @@ -915,7 +916,8 @@ def from_config(cls, *args, **kwargs): def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) -class StableDiffusionXLAdapterPipeline(metaclass=DummyObject): + +class StableDiffusionXLControlNetImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs):