File tree Expand file tree Collapse file tree 1 file changed +0
-4
lines changed Expand file tree Collapse file tree 1 file changed +0
-4
lines changed Original file line number Diff line number Diff line change @@ -222,9 +222,6 @@ def set_use_memory_efficient_attention_xformers(
222
222
)
223
223
processor .load_state_dict (self .processor .state_dict ())
224
224
processor .to (self .processor .to_q_lora .up .weight .device )
225
- print (
226
- f"is_lora is set to { is_lora } , type: LoRAXFormersAttnProcessor: { isinstance (processor , LoRAXFormersAttnProcessor )} "
227
- )
228
225
elif is_custom_diffusion :
229
226
processor = CustomDiffusionXFormersAttnProcessor (
230
227
train_kv = self .processor .train_kv ,
@@ -262,7 +259,6 @@ def set_use_memory_efficient_attention_xformers(
262
259
# We use the AttnProcessor2_0 by default when torch 2.x is used which uses
263
260
# torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention
264
261
# but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1
265
- print ("Still defaulting to: AttnProcessor2_0 :O" )
266
262
processor = (
267
263
AttnProcessor2_0 ()
268
264
if hasattr (F , "scaled_dot_product_attention" ) and self .scale_qk
You can’t perform that action at this time.
0 commit comments