diff --git a/src/diffusers/loaders/lora_base.py b/src/diffusers/loaders/lora_base.py index 4497d57d545c..17ed8c5444fc 100644 --- a/src/diffusers/loaders/lora_base.py +++ b/src/diffusers/loaders/lora_base.py @@ -423,8 +423,12 @@ def _load_lora_into_text_encoder( # Unsafe code /> if prefix is not None and not state_dict: - logger.info( - f"No LoRA keys associated to {text_encoder.__class__.__name__} found with the {prefix=}. This is safe to ignore if LoRA state dict didn't originally have any {text_encoder.__class__.__name__} related params. Open an issue if you think it's unexpected: https://github.com/huggingface/diffusers/issues/new" + logger.warning( + f"No LoRA keys associated to {text_encoder.__class__.__name__} found with the {prefix=}. " + "This is safe to ignore if LoRA state dict didn't originally have any " + f"{text_encoder.__class__.__name__} related params. You can also try specifying `prefix=None` " + "to resolve the warning. Otherwise, open an issue if you think it's unexpected: " + "https://github.com/huggingface/diffusers/issues/new" ) diff --git a/src/diffusers/loaders/peft.py b/src/diffusers/loaders/peft.py index fe29738f02e6..74e51445cc1e 100644 --- a/src/diffusers/loaders/peft.py +++ b/src/diffusers/loaders/peft.py @@ -354,8 +354,12 @@ def load_lora_adapter(self, pretrained_model_name_or_path_or_dict, prefix="trans # Unsafe code /> if prefix is not None and not state_dict: - logger.info( - f"No LoRA keys associated to {self.__class__.__name__} found with the {prefix=}. This is safe to ignore if LoRA state dict didn't originally have any {self.__class__.__name__} related params. Open an issue if you think it's unexpected: https://github.com/huggingface/diffusers/issues/new" + logger.warning( + f"No LoRA keys associated to {self.__class__.__name__} found with the {prefix=}. " + "This is safe to ignore if LoRA state dict didn't originally have any " + f"{self.__class__.__name__} related params. You can also try specifying `prefix=None` " + "to resolve the warning. Otherwise, open an issue if you think it's unexpected: " + "https://github.com/huggingface/diffusers/issues/new" ) def save_lora_adapter( diff --git a/tests/lora/utils.py b/tests/lora/utils.py index df4adb9ee346..8cdb43c9d085 100644 --- a/tests/lora/utils.py +++ b/tests/lora/utils.py @@ -1961,7 +1961,7 @@ def test_logs_info_when_no_lora_keys_found(self): no_op_state_dict = {"lora_foo": torch.tensor(2.0), "lora_bar": torch.tensor(3.0)} logger = logging.get_logger("diffusers.loaders.peft") - logger.setLevel(logging.INFO) + logger.setLevel(logging.WARNING) with CaptureLogger(logger) as cap_logger: pipe.load_lora_weights(no_op_state_dict) @@ -1981,7 +1981,7 @@ def test_logs_info_when_no_lora_keys_found(self): prefix = "text_encoder_2" logger = logging.get_logger("diffusers.loaders.lora_base") - logger.setLevel(logging.INFO) + logger.setLevel(logging.WARNING) with CaptureLogger(logger) as cap_logger: self.pipeline_class.load_lora_into_text_encoder(