From 2750ccafaa3ae6d8f2553aee0ae90406c467a6dc Mon Sep 17 00:00:00 2001 From: Tuna Tuncer Date: Fri, 11 Apr 2025 15:04:53 +0200 Subject: [PATCH] Fix incorrect tile_latent_min_width calculation in AutoencoderKLMochi --- src/diffusers/models/autoencoders/autoencoder_kl_mochi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/models/autoencoders/autoencoder_kl_mochi.py b/src/diffusers/models/autoencoders/autoencoder_kl_mochi.py index f6d9b3bb4834..edf270f66e92 100644 --- a/src/diffusers/models/autoencoders/autoencoder_kl_mochi.py +++ b/src/diffusers/models/autoencoders/autoencoder_kl_mochi.py @@ -909,7 +909,7 @@ def encode( def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: batch_size, num_channels, num_frames, height, width = z.shape tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio - tile_latent_min_width = self.tile_sample_stride_width // self.spatial_compression_ratio + tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio if self.use_tiling and (width > tile_latent_min_width or height > tile_latent_min_height): return self.tiled_decode(z, return_dict=return_dict)