Skip to content

Commit c6ae883

Browse files
authored
remove print statements from attention processor. (#3592)
1 parent 5559d04 commit c6ae883

File tree

1 file changed

+0
-4
lines changed

1 file changed

+0
-4
lines changed

src/diffusers/models/attention_processor.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -222,9 +222,6 @@ def set_use_memory_efficient_attention_xformers(
222222
)
223223
processor.load_state_dict(self.processor.state_dict())
224224
processor.to(self.processor.to_q_lora.up.weight.device)
225-
print(
226-
f"is_lora is set to {is_lora}, type: LoRAXFormersAttnProcessor: {isinstance(processor, LoRAXFormersAttnProcessor)}"
227-
)
228225
elif is_custom_diffusion:
229226
processor = CustomDiffusionXFormersAttnProcessor(
230227
train_kv=self.processor.train_kv,
@@ -262,7 +259,6 @@ def set_use_memory_efficient_attention_xformers(
262259
# We use the AttnProcessor2_0 by default when torch 2.x is used which uses
263260
# torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention
264261
# but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1
265-
print("Still defaulting to: AttnProcessor2_0 :O")
266262
processor = (
267263
AttnProcessor2_0()
268264
if hasattr(F, "scaled_dot_product_attention") and self.scale_qk

0 commit comments

Comments
 (0)