From d1a7a584c9d97d06adbf4dd43e51ddc9ea36ff83 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Sun, 9 Feb 2025 23:00:08 +0000 Subject: [PATCH 01/40] First commit - Adding Buffer and Config classes --- .../logging/buffer/__init__.py | 4 + aws_lambda_powertools/logging/buffer/cache.py | 91 ++++++++++ .../logging/buffer/config.py | 93 ++++++++++ tests/unit/logger/__init__.py | 0 tests/unit/logger/test_logger_buffer_cache.py | 166 ++++++++++++++++++ .../unit/logger/test_logger_buffer_config.py | 91 ++++++++++ 6 files changed, 445 insertions(+) create mode 100644 aws_lambda_powertools/logging/buffer/__init__.py create mode 100644 aws_lambda_powertools/logging/buffer/cache.py create mode 100644 aws_lambda_powertools/logging/buffer/config.py create mode 100644 tests/unit/logger/__init__.py create mode 100644 tests/unit/logger/test_logger_buffer_cache.py create mode 100644 tests/unit/logger/test_logger_buffer_config.py diff --git a/aws_lambda_powertools/logging/buffer/__init__.py b/aws_lambda_powertools/logging/buffer/__init__.py new file mode 100644 index 00000000000..912a3ca41a7 --- /dev/null +++ b/aws_lambda_powertools/logging/buffer/__init__.py @@ -0,0 +1,4 @@ +from aws_lambda_powertools.logging.buffer.cache import LoggerBufferCache +from aws_lambda_powertools.logging.buffer.config import LoggerBufferConfig + +__all__ = ["LoggerBufferCache", "LoggerBufferConfig"] diff --git a/aws_lambda_powertools/logging/buffer/cache.py b/aws_lambda_powertools/logging/buffer/cache.py new file mode 100644 index 00000000000..88ab977354e --- /dev/null +++ b/aws_lambda_powertools/logging/buffer/cache.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +import warnings +from collections import deque +from typing import Any + +from aws_lambda_powertools.warnings import PowertoolsUserWarning + + +class LoggerBufferCache: + def __init__(self, max_size_bytes: int): + """ + Initialize the LoggerBufferCache. + + Parameters + ---------- + max_size_bytes : int + Maximum size of the cache in bytes. + """ + self.max_size_bytes: int = max_size_bytes + self.cache: dict[str, deque] = {} + self.current_size: dict[str, int] = {} + + def add(self, key: str, item: Any) -> None: + """ + Add an item to the cache for a specific key. + + Parameters + ---------- + key : str + The key to store the item under. + item : Any + The item to be stored in the cache. + + Notes + ----- + If the item size exceeds the maximum cache size, it will not be added. + """ + item_size = len(str(item)) + + if item_size > self.max_size_bytes: + warnings.warn( + message=f"Item size {item_size} bytes exceeds total cache size {self.max_size_bytes} bytes", + category=PowertoolsUserWarning, + stacklevel=2, + ) + return + + if key not in self.cache: + self.cache[key] = deque() + self.current_size[key] = 0 + + while self.current_size[key] + item_size > self.max_size_bytes and self.cache[key]: + removed_item = self.cache[key].popleft() + self.current_size[key] -= len(str(removed_item)) + + self.cache[key].append(item) + self.current_size[key] += item_size + + def get(self, key: str) -> list: + """ + Retrieve items for a specific key. + + Parameters + ---------- + key : str + The key to retrieve items for. + + Returns + ------- + list + List of items for the given key, or an empty list if the key doesn't exist. + """ + return list(self.cache.get(key, deque())) + + def clear(self, key: str | None = None) -> None: + """ + Clear the cache, either for a specific key or entirely. + + Parameters + ---------- + key : str, optional + The key to clear. If None, clears the entire cache. + """ + if key: + if key in self.cache: + del self.cache[key] + del self.current_size[key] + else: + self.cache.clear() + self.current_size.clear() diff --git a/aws_lambda_powertools/logging/buffer/config.py b/aws_lambda_powertools/logging/buffer/config.py new file mode 100644 index 00000000000..0890ddee20f --- /dev/null +++ b/aws_lambda_powertools/logging/buffer/config.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +from typing import Literal + + +class LoggerBufferConfig: + """ + Configuration for log buffering behavior. + """ + + # Define a type alias for valid log levels + LogLevelType = Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] + + # Define class-level constant for valid log levels + VALID_LOG_LEVELS: list[str] = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] + + def __init__( + self, + max_size: int = 10240, + minimum_log_level: LogLevelType = "DEBUG", + flush_on_error: bool = True, + compress: bool = False, + ): + """ + Initialize logger buffer configuration. + + Parameters + ---------- + max_size : int, optional + Maximum size of the buffer in bytes + minimum_log_level : str, optional + Minimum log level to buffer + flush_on_error : bool, optional + Whether to flush the buffer when an error occurs + compress : bool, optional + Whether to compress buffered logs + """ + self._validate_inputs(max_size, minimum_log_level, flush_on_error, compress) + + self._max_size = max_size + self._minimum_log_level = minimum_log_level.upper() + self._flush_on_error = flush_on_error + self._compress = compress + + def _validate_inputs( + self, + max_size: int, + minimum_log_level: str, + flush_on_error: bool, + compress: bool, + ) -> None: + """ + Validate configuration inputs. + + Parameters + ---------- + Same as __init__ method parameters + """ + if not isinstance(max_size, int) or max_size <= 0: + raise ValueError("Max size must be a positive integer") + + if not isinstance(minimum_log_level, str): + raise ValueError("Log level must be a string") + + # Validate log level + if minimum_log_level.upper() not in self.VALID_LOG_LEVELS: + raise ValueError(f"Invalid log level. Must be one of {self.VALID_LOG_LEVELS}") + + if not isinstance(flush_on_error, bool): + raise ValueError("flush_on_error must be a boolean") + + if not isinstance(compress, bool): + raise ValueError("compress must be a boolean") + + @property + def max_size(self) -> int: + """Maximum buffer size in bytes.""" + return self._max_size + + @property + def minimum_log_level(self) -> str: + """Minimum log level to buffer.""" + return self._minimum_log_level + + @property + def flush_on_error(self) -> bool: + """Flag to flush buffer on error.""" + return self._flush_on_error + + @property + def compress(self) -> bool: + """Flag to compress buffered logs.""" + return self._compress diff --git a/tests/unit/logger/__init__.py b/tests/unit/logger/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/unit/logger/test_logger_buffer_cache.py b/tests/unit/logger/test_logger_buffer_cache.py new file mode 100644 index 00000000000..8682acd67aa --- /dev/null +++ b/tests/unit/logger/test_logger_buffer_cache.py @@ -0,0 +1,166 @@ +import pytest + +from aws_lambda_powertools.logging.buffer import LoggerBufferCache +from aws_lambda_powertools.warnings import PowertoolsUserWarning + + +def test_initialization(): + + # GIVEN a new instance of LoggerBufferCache + logger_cache = LoggerBufferCache(1000) + + # THEN cache should have correct initial state + assert logger_cache.max_size_bytes == 1000 + assert logger_cache.cache == {} + assert logger_cache.current_size == {} + + +def test_add_single_item(): + # GIVEN a new instance of LoggerBufferCache with 1024 bytes + logger_cache = LoggerBufferCache(1024) + + # WHEN a single item is added + logger_cache.add("key1", "test_item") + + # THEN item is stored correctly with proper size tracking + assert len(logger_cache.get("key1")) == 1 + assert logger_cache.get("key1")[0] == "test_item" + assert logger_cache.current_size["key1"] == len("test_item") + + +def test_add_multiple_items_same_key(): + # GIVEN a new instance of LoggerBufferCache with 1024 bytes + logger_cache = LoggerBufferCache(1024) + + # WHEN multiple items are added to the same key + logger_cache.add("key1", "item1") + logger_cache.add("key1", "item2") + + # THEN items are stored sequentially + assert len(logger_cache.get("key1")) == 2 + assert logger_cache.get("key1") == ["item1", "item2"] + + +def test_cache_size_limit_single_key(): + # GIVEN a new instance of LoggerBufferCache with small cache size + logger_cache = LoggerBufferCache(10) + + # WHEN multiple items are added + logger_cache.add("key1", "long_item1") + logger_cache.add("key1", "long_item2") + logger_cache.add("key1", "long_item3") + + # THEN cache maintains size limit for a single key + assert len(logger_cache.get("key1")) > 0 + assert logger_cache.current_size["key1"] <= 10 + + +def test_item_larger_than_cache(): + # GIVEN a new instance of LoggerBufferCache with small cache size + logger_cache = LoggerBufferCache(5) + + # WHEN an item larger than cache is added + with pytest.warns(expected_warning=PowertoolsUserWarning, match="Item size *"): + # THEN a warning is raised + logger_cache.add("key1", "very_long_item") + + # THEN the key is not added + assert "key1" not in logger_cache.cache + + +def test_get_existing_key(): + # GIVEN a new instance of LoggerBufferCache with 1024 bytes + logger_cache = LoggerBufferCache(1024) + + # WHEN we add keys + logger_cache.add("key1", "item1") + logger_cache.add("key1", "item2") + + # THEN all items are retrieved + assert logger_cache.get("key1") == ["item1", "item2"] + + +def test_get_non_existing_key(): + # GIVEN a new instance of LoggerBufferCache with 1024 bytes + logger_cache = LoggerBufferCache(1000) + + # WHEN getting items for a non-existing key + retrieved = logger_cache.get("non_existing") + + # THEN an empty list is returned + assert retrieved == [] + + +def test_clear_all(): + # GIVEN a new instance of LoggerBufferCache with 1024 bytes + logger_cache = LoggerBufferCache(1024) + + # WHEN we add multiple keys + logger_cache.add("key1", "item1") + logger_cache.add("key2", "item2") + + # WHEN clearing all keys + logger_cache.clear() + + # THEN cache becomes empty + assert logger_cache.cache == {} + assert logger_cache.current_size == {} + + +def test_clear_specific_key(): + # GIVEN a new instance of LoggerBufferCache with 1024 bytes + logger_cache = LoggerBufferCache(1024) + + # WHEN we add multiple keys + logger_cache.add("key1", "item1") + logger_cache.add("key2", "item2") + + # WHEN we remove a specific key + logger_cache.clear("key1") + + # THEN only that key is removed + assert "key1" not in logger_cache.cache + assert "key2" in logger_cache.cache + assert logger_cache.get("key1") == [] + + +def test_multiple_keys_with_size_limits(): + # GIVEN a new instance of LoggerBufferCache with 20 bytes + logger_cache = LoggerBufferCache(20) + + # WHEN adding items to multiple keys + logger_cache.add("key1", "item1") + logger_cache.add("key1", "item2") + logger_cache.add("key2", "long_item") + + # THEN total size remains within limit + assert len(logger_cache.cache["key1"]) > 0 + assert len(logger_cache.cache["key2"]) > 0 + assert logger_cache.current_size["key1"] + logger_cache.current_size["key2"] <= 20 + + +def test_add_different_types(): + # GIVEN a new instance of LoggerBufferCache with 1024 bytes + logger_cache = LoggerBufferCache(1024) + + # WHEN adding items of different types + logger_cache.add("key1", 123) + logger_cache.add("key1", [1, 2, 3]) + logger_cache.add("key1", {"a": 1}) + + # THEN items are stored successfully + retrieved = logger_cache.get("key1") + assert len(retrieved) == 3 + + +def test_cache_size_tracking(): + # GIVEN a new instance of LoggerBufferCache with 30 bytes + logger_cache = LoggerBufferCache(30) + + # WHEN adding items + logger_cache.add("key1", "small") + logger_cache.add("key1", "another_item") + + # THEN current size is tracked correctly + assert logger_cache.current_size["key1"] == len("small") + len("another_item") + assert logger_cache.current_size["key1"] <= 30 diff --git a/tests/unit/logger/test_logger_buffer_config.py b/tests/unit/logger/test_logger_buffer_config.py new file mode 100644 index 00000000000..dca63d16f3e --- /dev/null +++ b/tests/unit/logger/test_logger_buffer_config.py @@ -0,0 +1,91 @@ +import pytest + +from aws_lambda_powertools.logging.buffer import LoggerBufferConfig + + +def test_default_configuration(): + # GIVEN no specific configuration parameters + config_buffer = LoggerBufferConfig() + + # THEN default values are default + assert config_buffer.max_size == 10240 + assert config_buffer.minimum_log_level == "DEBUG" + assert config_buffer.flush_on_error is True + assert config_buffer.compress is False + + +def test_custom_configuration(): + # GIVEN a new LoggerBufferConfig with custom configuration parameters + config_buffer = LoggerBufferConfig( + max_size=20480, + minimum_log_level="WARNING", + flush_on_error=False, + compress=True, + ) + + # THEN configuration is set with provided values + assert config_buffer.max_size == 20480 + assert config_buffer.minimum_log_level == "WARNING" + assert config_buffer.flush_on_error is False + assert config_buffer.compress is True + + +def test_invalid_max_size_negative(): + # GIVEN an invalid negative max size + invalid_max_size = -100 + + # WHEN creating a LoggerBufferConfig + with pytest.raises(ValueError, match="Max size must be a positive integer"): + # THEN a ValueError is raised + LoggerBufferConfig(max_size=invalid_max_size) + + +def test_invalid_max_size_type(): + # GIVEN an invalid max size type + invalid_max_size = "10240" + + # WHEN creating a LoggerBufferConfig + with pytest.raises(ValueError, match="Max size must be a positive integer"): + # THEN a ValueError is raised + LoggerBufferConfig(max_size=invalid_max_size) + + +def test_invalid_log_level(): + # GIVEN an invalid log level + invalid_log_levels = ["INVALID_LEVEL", 123, None] + + # WHEN creating a LoggerBufferConfig + for invalid_log_level in invalid_log_levels: + # THEN a ValueError is raised + with pytest.raises(ValueError): + LoggerBufferConfig(minimum_log_level=invalid_log_level) + + +def test_case_insensitive_log_level(): + # GIVEN + test_cases = ["debug", "Info", "WARNING", "error", "CRITICAL"] + + # WHEN / THEN + for log_level in test_cases: + config = LoggerBufferConfig(minimum_log_level=log_level) + assert config.minimum_log_level == log_level.upper() + + +def test_invalid_flush_on_error(): + # GIVEN an invalid flush_on_error type + invalid_flush_on_error = "True" + + # WHEN creating a LoggerBufferConfig / THEN + with pytest.raises(ValueError, match="flush_on_error must be a boolean"): + # THEN a ValueError is raised + LoggerBufferConfig(flush_on_error=invalid_flush_on_error) + + +def test_invalid_compress(): + # GIVEN an invalid compress type + invalid_compress = "False" + + # WHEN creating a LoggerBufferConfig / THEN + with pytest.raises(ValueError, match="compress must be a boolean"): + # THEN a ValueError is raised + LoggerBufferConfig(compress=invalid_compress) From 6c4ef9c8537140ba75de0a52b96e48af44376359 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Sun, 9 Feb 2025 23:11:57 +0000 Subject: [PATCH 02/40] First commit - Adding Buffer and Config classes --- aws_lambda_powertools/logging/buffer/__init__.py | 3 +-- tests/unit/logger/test_logger_buffer_cache.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/__init__.py b/aws_lambda_powertools/logging/buffer/__init__.py index 912a3ca41a7..0e7a8cce6bd 100644 --- a/aws_lambda_powertools/logging/buffer/__init__.py +++ b/aws_lambda_powertools/logging/buffer/__init__.py @@ -1,4 +1,3 @@ -from aws_lambda_powertools.logging.buffer.cache import LoggerBufferCache from aws_lambda_powertools.logging.buffer.config import LoggerBufferConfig -__all__ = ["LoggerBufferCache", "LoggerBufferConfig"] +__all__ = ["LoggerBufferConfig"] diff --git a/tests/unit/logger/test_logger_buffer_cache.py b/tests/unit/logger/test_logger_buffer_cache.py index 8682acd67aa..8bf357d2bb1 100644 --- a/tests/unit/logger/test_logger_buffer_cache.py +++ b/tests/unit/logger/test_logger_buffer_cache.py @@ -1,6 +1,6 @@ import pytest -from aws_lambda_powertools.logging.buffer import LoggerBufferCache +from aws_lambda_powertools.logging.buffer.cache import LoggerBufferCache from aws_lambda_powertools.warnings import PowertoolsUserWarning From e2dd0c4d32274a4385d2fd343127292e4f052458 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Sun, 9 Feb 2025 23:14:16 +0000 Subject: [PATCH 03/40] First commit - Adding Buffer and Config classes --- aws_lambda_powertools/logging/buffer/config.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/config.py b/aws_lambda_powertools/logging/buffer/config.py index 0890ddee20f..1dabd4894f4 100644 --- a/aws_lambda_powertools/logging/buffer/config.py +++ b/aws_lambda_powertools/logging/buffer/config.py @@ -8,16 +8,13 @@ class LoggerBufferConfig: Configuration for log buffering behavior. """ - # Define a type alias for valid log levels - LogLevelType = Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] - # Define class-level constant for valid log levels VALID_LOG_LEVELS: list[str] = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] def __init__( self, max_size: int = 10240, - minimum_log_level: LogLevelType = "DEBUG", + minimum_log_level: Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] = "DEBUG", flush_on_error: bool = True, compress: bool = False, ): From 511f1473ee804c3192ce3893ad93828815b03959 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Sun, 9 Feb 2025 23:16:21 +0000 Subject: [PATCH 04/40] First commit - Adding nox tests --- noxfile.py | 1 + tests/unit/logger/required_dependencies/__init__.py | 0 .../{ => required_dependencies}/test_logger_buffer_cache.py | 0 .../{ => required_dependencies}/test_logger_buffer_config.py | 0 4 files changed, 1 insertion(+) create mode 100644 tests/unit/logger/required_dependencies/__init__.py rename tests/unit/logger/{ => required_dependencies}/test_logger_buffer_cache.py (100%) rename tests/unit/logger/{ => required_dependencies}/test_logger_buffer_config.py (100%) diff --git a/noxfile.py b/noxfile.py index 7b73fd0dc59..770d147ecee 100644 --- a/noxfile.py +++ b/noxfile.py @@ -60,6 +60,7 @@ def test_with_only_required_packages(session: nox.Session): session, folders=[ f"{PREFIX_TESTS_FUNCTIONAL}/logger/required_dependencies/", + f"{PREFIX_TESTS_UNIT}/logger/required_dependencies/", f"{PREFIX_TESTS_FUNCTIONAL}/metrics/required_dependencies/", f"{PREFIX_TESTS_FUNCTIONAL}/middleware_factory/required_dependencies/", f"{PREFIX_TESTS_FUNCTIONAL}/typing/required_dependencies/", diff --git a/tests/unit/logger/required_dependencies/__init__.py b/tests/unit/logger/required_dependencies/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/unit/logger/test_logger_buffer_cache.py b/tests/unit/logger/required_dependencies/test_logger_buffer_cache.py similarity index 100% rename from tests/unit/logger/test_logger_buffer_cache.py rename to tests/unit/logger/required_dependencies/test_logger_buffer_cache.py diff --git a/tests/unit/logger/test_logger_buffer_config.py b/tests/unit/logger/required_dependencies/test_logger_buffer_config.py similarity index 100% rename from tests/unit/logger/test_logger_buffer_config.py rename to tests/unit/logger/required_dependencies/test_logger_buffer_config.py From d24a25896895e11608aba5bdc70171603f34687e Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Sun, 9 Feb 2025 23:37:39 +0000 Subject: [PATCH 05/40] Adding new parameter to the constructor --- aws_lambda_powertools/logging/buffer/config.py | 7 +++++-- aws_lambda_powertools/logging/logger.py | 13 +++++++++++-- aws_lambda_powertools/logging/types.py | 4 +++- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/config.py b/aws_lambda_powertools/logging/buffer/config.py index 1dabd4894f4..3f99c0e5d0c 100644 --- a/aws_lambda_powertools/logging/buffer/config.py +++ b/aws_lambda_powertools/logging/buffer/config.py @@ -1,6 +1,9 @@ from __future__ import annotations -from typing import Literal +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from aws_lambda_powertools.logging.types import LOG_LEVEL_VALUES class LoggerBufferConfig: @@ -14,7 +17,7 @@ class LoggerBufferConfig: def __init__( self, max_size: int = 10240, - minimum_log_level: Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] = "DEBUG", + minimum_log_level: LOG_LEVEL_VALUES = "DEBUG", flush_on_error: bool = True, compress: bool = False, ): diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 840d068d438..7b8872eb8fa 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -16,6 +16,7 @@ from contextlib import contextmanager from typing import IO, TYPE_CHECKING, Any, Callable, Generator, Iterable, Mapping, TypeVar, overload +from aws_lambda_powertools.logging.buffer.cache import LoggerBufferCache from aws_lambda_powertools.logging.constants import ( LOGGER_ATTRIBUTE_PRECONFIGURED, ) @@ -36,8 +37,11 @@ from aws_lambda_powertools.utilities import jmespath_utils if TYPE_CHECKING: + from aws_lambda_powertools.logging.buffer.config import LoggerBufferConfig + from aws_lambda_powertools.logging.types import LOG_LEVEL_VALUES from aws_lambda_powertools.shared.types import AnyCallableT + logger = logging.getLogger(__name__) is_cold_start = True @@ -82,7 +86,7 @@ class Logger: ---------- service : str, optional service name to be appended in logs, by default "service_undefined" - level : str, int optional + level : Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], int optional The level to set. Can be a string representing the level name: 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL' or an integer representing the level value: 10 for 'DEBUG', 20 for 'INFO', 30 for 'WARNING', 40 for 'ERROR', 50 for 'CRITICAL'. by default "INFO" @@ -200,7 +204,7 @@ class Logger: def __init__( self, service: str | None = None, - level: str | int | None = None, + level: LOG_LEVEL_VALUES | int | None = None, child: bool = False, sampling_rate: float | None = None, stream: IO[str] | None = None, @@ -216,6 +220,7 @@ def __init__( utc: bool = False, use_rfc3339: bool = False, serialize_stacktrace: bool = True, + logger_buffer: LoggerBufferConfig | None = None, **kwargs, ) -> None: self.service = resolve_env_var_choice( @@ -252,6 +257,10 @@ def __init__( "serialize_stacktrace": serialize_stacktrace, } + self._logger_buffer = logger_buffer + if self._logger_buffer: + self._buffer_cache = LoggerBufferCache(max_size_bytes=self._logger_buffer.max_size) + self._init_logger(formatter_options=formatter_options, log_level=level, **kwargs) if self.log_uncaught_exceptions: diff --git a/aws_lambda_powertools/logging/types.py b/aws_lambda_powertools/logging/types.py index 25f094bc755..7d298dd7412 100644 --- a/aws_lambda_powertools/logging/types.py +++ b/aws_lambda_powertools/logging/types.py @@ -1,10 +1,12 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Dict, TypedDict, Union +from typing import TYPE_CHECKING, Any, Dict, Literal, TypedDict, Union if TYPE_CHECKING: from typing_extensions import NotRequired, TypeAlias +LOG_LEVEL_VALUES = Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] + class PowertoolsLogRecord(TypedDict): # Base fields (required) From 0893e14cb35745be93f27ff19dbd99ac8415f068 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Mon, 10 Feb 2025 16:00:27 +0000 Subject: [PATCH 06/40] Starting tests --- aws_lambda_powertools/logging/logger.py | 36 +++++-- .../test_logger_powertools_buffer.py | 96 +++++++++++++++++++ .../performance/test_zlib_shared_functions.py | 0 3 files changed, 124 insertions(+), 8 deletions(-) create mode 100644 tests/functional/logger/required_dependencies/test_logger_powertools_buffer.py create mode 100644 tests/performance/test_zlib_shared_functions.py diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 7b8872eb8fa..2e401f2e29d 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -12,6 +12,7 @@ import os import random import sys +import time import warnings from contextlib import contextmanager from typing import IO, TYPE_CHECKING, Any, Callable, Generator, Iterable, Mapping, TypeVar, overload @@ -457,6 +458,12 @@ def decorate(event, context, *args, **kwargs): return decorate + def _add_log_to_buffer(self, level, msg, filename, line, function, **kwargs): + if self._buffer_cache: + return False + + return True + def info( self, msg: object, @@ -470,14 +477,27 @@ def info( extra = extra or {} extra = {**extra, **kwargs} - return self._logger.info( - msg, - *args, - exc_info=exc_info, - stack_info=stack_info, - stacklevel=stacklevel, - extra=extra, - ) + if self._logger_buffer: + caller_frame = inspect.stack()[1] + record = { + "level": "INFO", + "msg": msg % args if args else msg, + "filename": caller_frame.filename, + "line": caller_frame.lineno, + "function": caller_frame.function, + "extra_kwargs": kwargs, + "timestamp": time.time(), + } + return self._add_log_to_buffer(**record) + else: + return self._logger.info( + msg, + *args, + exc_info=exc_info, + stack_info=stack_info, + stacklevel=stacklevel, + extra=extra, + ) def error( self, diff --git a/tests/functional/logger/required_dependencies/test_logger_powertools_buffer.py b/tests/functional/logger/required_dependencies/test_logger_powertools_buffer.py new file mode 100644 index 00000000000..d8332cb6d8d --- /dev/null +++ b/tests/functional/logger/required_dependencies/test_logger_powertools_buffer.py @@ -0,0 +1,96 @@ +"""aws_lambda_logging tests.""" + +import io +import json +import random +import string +from collections import namedtuple + +import pytest + +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging.buffer import LoggerBufferConfig + + +@pytest.fixture +def lambda_context(): + lambda_context = { + "function_name": "test", + "memory_limit_in_mb": 128, + "invoked_function_arn": "arn:aws:lambda:eu-west-1:809313241:function:test", + "aws_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72", + } + + return namedtuple("LambdaContext", lambda_context.keys())(*lambda_context.values()) + + +def check_log_dict(log_dict): + assert "timestamp" in log_dict + assert "level" in log_dict + assert "location" in log_dict + assert "message" in log_dict + + +@pytest.fixture +def stdout(): + return io.StringIO() + + +@pytest.fixture +def service_name(): + chars = string.ascii_letters + string.digits + return "".join(random.SystemRandom().choice(chars) for _ in range(15)) + + +def capture_logging_output(stdout): + return [json.loads(d.strip()) for d in stdout.getvalue().strip().split("\n")] + + +def test_logger_buffer_is_enabled(): + + buffer_config = LoggerBufferConfig(max_size=10240) + logger = Logger(level="INFO", buffer_config=buffer_config) + + leo = logger.info("A") + + assert leo is True + + +@pytest.mark.parametrize("level", ["DEBUG", "WARNING", "ERROR", "INFO", "CRITICAL"]) +def test_setup_with_valid_log_levels(stdout, level, service_name): + logger = Logger(service=service_name, level=level, stream=stdout, request_id="request id!", another="value") + msg = "This is a test" + log_command = { + "INFO": logger.info, + "ERROR": logger.error, + "WARNING": logger.warning, + "DEBUG": logger.debug, + "CRITICAL": logger.critical, + } + + log_message = log_command[level] + log_message(msg) + + log_dict = json.loads(stdout.getvalue().strip()) + + check_log_dict(log_dict) + + assert level == log_dict["level"] + assert "This is a test" == log_dict["message"] + assert "request id!" == log_dict["request_id"] + assert "exception" not in log_dict + + +def test_logging_exception_traceback(stdout, service_name): + logger = Logger(service=service_name, level="DEBUG", stream=stdout) + + try: + raise ValueError("Boom") + except ValueError: + logger.exception("A value error occurred") + + log_dict = json.loads(stdout.getvalue()) + + check_log_dict(log_dict) + assert "ERROR" == log_dict["level"] + assert "exception" in log_dict diff --git a/tests/performance/test_zlib_shared_functions.py b/tests/performance/test_zlib_shared_functions.py new file mode 100644 index 00000000000..e69de29bb2d From 505eb984355bf9ab9a24898bd0e0ea9fdcd9016a Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 13 Feb 2025 17:30:44 +0000 Subject: [PATCH 07/40] Adding initial logic + test --- .../logging/buffer/config.py | 2 +- .../logging/buffer/functions.py | 42 +++++++ aws_lambda_powertools/logging/logger.py | 113 +++++++++++------- .../test_logger_powertools_buffer.py | 96 --------------- .../test_powertools_logger_buffer.py | 94 +++++++++++++++ .../test_logger_buffer_config.py | 2 +- 6 files changed, 207 insertions(+), 142 deletions(-) create mode 100644 aws_lambda_powertools/logging/buffer/functions.py delete mode 100644 tests/functional/logger/required_dependencies/test_logger_powertools_buffer.py create mode 100644 tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py diff --git a/aws_lambda_powertools/logging/buffer/config.py b/aws_lambda_powertools/logging/buffer/config.py index 3f99c0e5d0c..ffd9dfdceb4 100644 --- a/aws_lambda_powertools/logging/buffer/config.py +++ b/aws_lambda_powertools/logging/buffer/config.py @@ -12,7 +12,7 @@ class LoggerBufferConfig: """ # Define class-level constant for valid log levels - VALID_LOG_LEVELS: list[str] = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] + VALID_LOG_LEVELS: list[str] = ["DEBUG", "INFO", "WARNING"] def __init__( self, diff --git a/aws_lambda_powertools/logging/buffer/functions.py b/aws_lambda_powertools/logging/buffer/functions.py new file mode 100644 index 00000000000..c051c0b78b6 --- /dev/null +++ b/aws_lambda_powertools/logging/buffer/functions.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +import inspect +import time +from typing import Any + + +def _create_buffer_record(level: str, msg: object, args: object, **kwargs) -> dict[str, Any]: + caller_frame = inspect.stack()[2] + timestamp = time.time() + + return { + "level": level.upper(), + "msg": msg, + "args": args, + "filename": caller_frame.filename, + "line": caller_frame.lineno, + "function": caller_frame.function, + "extra_kwargs": kwargs or {}, + "timestamp": timestamp, + } + + +def _resolve_buffer_log_level(buffer_log_level, current_log_level): + # Define log level mapping + log_levels = { + "DEBUG": 10, + "INFO": 20, + "WARNING": 30, + "ERROR": 40, + "CRITICAL": 50, + } + + # Convert string levels to numeric if needed + buffer_level_num = log_levels.get(buffer_log_level.upper()) + current_level_num = log_levels.get(current_log_level.upper()) + + # Compare numeric levels + if buffer_level_num < current_level_num: + return True + + return False diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 2e401f2e29d..dac72ba224b 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -12,12 +12,12 @@ import os import random import sys -import time import warnings from contextlib import contextmanager from typing import IO, TYPE_CHECKING, Any, Callable, Generator, Iterable, Mapping, TypeVar, overload from aws_lambda_powertools.logging.buffer.cache import LoggerBufferCache +from aws_lambda_powertools.logging.buffer.functions import _create_buffer_record, _resolve_buffer_log_level from aws_lambda_powertools.logging.constants import ( LOGGER_ATTRIBUTE_PRECONFIGURED, ) @@ -459,12 +459,12 @@ def decorate(event, context, *args, **kwargs): return decorate def _add_log_to_buffer(self, level, msg, filename, line, function, **kwargs): - if self._buffer_cache: - return False + # Initial implementation, will always cache + self._buffer_cache.add("XRAY_ID", msg) return True - def info( + def debug( self, msg: object, *args: object, @@ -477,20 +477,9 @@ def info( extra = extra or {} extra = {**extra, **kwargs} - if self._logger_buffer: - caller_frame = inspect.stack()[1] - record = { - "level": "INFO", - "msg": msg % args if args else msg, - "filename": caller_frame.filename, - "line": caller_frame.lineno, - "function": caller_frame.function, - "extra_kwargs": kwargs, - "timestamp": time.time(), - } - return self._add_log_to_buffer(**record) - else: - return self._logger.info( + # Buffer is not active, flushing + if not self._logger_buffer: + return self._logger.debug( msg, *args, exc_info=exc_info, @@ -499,7 +488,11 @@ def info( extra=extra, ) - def error( + log_record = _create_buffer_record(level="DEBUG", msg=msg, args=args, **kwargs) + + self._add_log_to_buffer(**log_record) + + def info( self, msg: object, *args: object, @@ -512,20 +505,36 @@ def error( extra = extra or {} extra = {**extra, **kwargs} - return self._logger.error( - msg, - *args, - exc_info=exc_info, - stack_info=stack_info, - stacklevel=stacklevel, - extra=extra, - ) + # Buffer is not active, flushing + if not self._logger_buffer: + return self._logger.info( + msg, + *args, + exc_info=exc_info, + stack_info=stack_info, + stacklevel=stacklevel, + extra=extra, + ) - def exception( + if _resolve_buffer_log_level(self._logger_buffer.minimum_log_level, "INFO"): + return self._logger.info( + msg, + *args, + exc_info=exc_info, + stack_info=stack_info, + stacklevel=stacklevel, + extra=extra, + ) + + log_record: dict[str, Any] = _create_buffer_record(level="INFO", msg=msg, args=args, **kwargs) + + self._add_log_to_buffer(**log_record) + + def warning( self, msg: object, *args: object, - exc_info: logging._ExcInfoType = True, + exc_info: logging._ExcInfoType = None, stack_info: bool = False, stacklevel: int = 2, extra: Mapping[str, object] | None = None, @@ -534,16 +543,32 @@ def exception( extra = extra or {} extra = {**extra, **kwargs} - return self._logger.exception( - msg, - *args, - exc_info=exc_info, - stack_info=stack_info, - stacklevel=stacklevel, - extra=extra, - ) + # Buffer is not active, flushing + if not self._logger_buffer: + return self._logger.warning( + msg, + *args, + exc_info=exc_info, + stack_info=stack_info, + stacklevel=stacklevel, + extra=extra, + ) - def critical( + if _resolve_buffer_log_level(self._logger_buffer.minimum_log_level, "WARNING"): + return self._logger.warning( + msg, + *args, + exc_info=exc_info, + stack_info=stack_info, + stacklevel=stacklevel, + extra=extra, + ) + + log_record = _create_buffer_record(level="WARNING", msg=msg, args=args, **kwargs) + + self._add_log_to_buffer(**log_record) + + def error( self, msg: object, *args: object, @@ -556,7 +581,7 @@ def critical( extra = extra or {} extra = {**extra, **kwargs} - return self._logger.critical( + return self._logger.error( msg, *args, exc_info=exc_info, @@ -565,7 +590,7 @@ def critical( extra=extra, ) - def warning( + def critical( self, msg: object, *args: object, @@ -578,7 +603,7 @@ def warning( extra = extra or {} extra = {**extra, **kwargs} - return self._logger.warning( + return self._logger.critical( msg, *args, exc_info=exc_info, @@ -587,11 +612,11 @@ def warning( extra=extra, ) - def debug( + def exception( self, msg: object, *args: object, - exc_info: logging._ExcInfoType = None, + exc_info: logging._ExcInfoType = True, stack_info: bool = False, stacklevel: int = 2, extra: Mapping[str, object] | None = None, @@ -600,7 +625,7 @@ def debug( extra = extra or {} extra = {**extra, **kwargs} - return self._logger.debug( + return self._logger.exception( msg, *args, exc_info=exc_info, diff --git a/tests/functional/logger/required_dependencies/test_logger_powertools_buffer.py b/tests/functional/logger/required_dependencies/test_logger_powertools_buffer.py deleted file mode 100644 index d8332cb6d8d..00000000000 --- a/tests/functional/logger/required_dependencies/test_logger_powertools_buffer.py +++ /dev/null @@ -1,96 +0,0 @@ -"""aws_lambda_logging tests.""" - -import io -import json -import random -import string -from collections import namedtuple - -import pytest - -from aws_lambda_powertools import Logger -from aws_lambda_powertools.logging.buffer import LoggerBufferConfig - - -@pytest.fixture -def lambda_context(): - lambda_context = { - "function_name": "test", - "memory_limit_in_mb": 128, - "invoked_function_arn": "arn:aws:lambda:eu-west-1:809313241:function:test", - "aws_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72", - } - - return namedtuple("LambdaContext", lambda_context.keys())(*lambda_context.values()) - - -def check_log_dict(log_dict): - assert "timestamp" in log_dict - assert "level" in log_dict - assert "location" in log_dict - assert "message" in log_dict - - -@pytest.fixture -def stdout(): - return io.StringIO() - - -@pytest.fixture -def service_name(): - chars = string.ascii_letters + string.digits - return "".join(random.SystemRandom().choice(chars) for _ in range(15)) - - -def capture_logging_output(stdout): - return [json.loads(d.strip()) for d in stdout.getvalue().strip().split("\n")] - - -def test_logger_buffer_is_enabled(): - - buffer_config = LoggerBufferConfig(max_size=10240) - logger = Logger(level="INFO", buffer_config=buffer_config) - - leo = logger.info("A") - - assert leo is True - - -@pytest.mark.parametrize("level", ["DEBUG", "WARNING", "ERROR", "INFO", "CRITICAL"]) -def test_setup_with_valid_log_levels(stdout, level, service_name): - logger = Logger(service=service_name, level=level, stream=stdout, request_id="request id!", another="value") - msg = "This is a test" - log_command = { - "INFO": logger.info, - "ERROR": logger.error, - "WARNING": logger.warning, - "DEBUG": logger.debug, - "CRITICAL": logger.critical, - } - - log_message = log_command[level] - log_message(msg) - - log_dict = json.loads(stdout.getvalue().strip()) - - check_log_dict(log_dict) - - assert level == log_dict["level"] - assert "This is a test" == log_dict["message"] - assert "request id!" == log_dict["request_id"] - assert "exception" not in log_dict - - -def test_logging_exception_traceback(stdout, service_name): - logger = Logger(service=service_name, level="DEBUG", stream=stdout) - - try: - raise ValueError("Boom") - except ValueError: - logger.exception("A value error occurred") - - log_dict = json.loads(stdout.getvalue()) - - check_log_dict(log_dict) - assert "ERROR" == log_dict["level"] - assert "exception" in log_dict diff --git a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py new file mode 100644 index 00000000000..01e71047196 --- /dev/null +++ b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py @@ -0,0 +1,94 @@ +"""aws_lambda_logging tests.""" + +import io +import json +import random +import string +from collections import namedtuple + +import pytest + +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging.buffer import LoggerBufferConfig + + +@pytest.fixture +def lambda_context(): + lambda_context = { + "function_name": "test", + "memory_limit_in_mb": 128, + "invoked_function_arn": "arn:aws:lambda:eu-west-1:809313241:function:test", + "aws_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72", + } + + return namedtuple("LambdaContext", lambda_context.keys())(*lambda_context.values()) + + +@pytest.fixture +def stdout(): + return io.StringIO() + + +@pytest.fixture +def service_name(): + chars = string.ascii_letters + string.digits + return "".join(random.SystemRandom().choice(chars) for _ in range(15)) + + +def capture_logging_output(stdout): + return json.loads(stdout.getvalue().strip()) + + +def capture_multiple_logging_statements_output(stdout): + return [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] + + +@pytest.mark.parametrize("log_level", ["DEBUG", "WARNING", "INFO"]) +def test_logger_buffer_with_minimum_level_warning(log_level, stdout, service_name): + # GIVEN a configured logger with buffer enabled and specific minimum log level + logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="WARNING") + logger = Logger(level=log_level, service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + msg = "This is a test" + log_command = { + "INFO": logger.info, + "WARNING": logger.warning, + "DEBUG": logger.debug, + } + + # WHEN a log message is sent using the corresponding log method + log_message = log_command[log_level] + log_message(msg) + log_dict = stdout.getvalue() + + # THEN verify that the message is buffered and not immediately output + assert log_dict == "" + + +def test_logger_buffer_is_never_buffered_with_exception(stdout, service_name): + # GIVEN: A logger configured with buffer + logger_buffer_config = LoggerBufferConfig(max_size=10240) + logger = Logger(service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + # WHEN: An exception is raised and logged + try: + raise ValueError("something went wrong") + except Exception: + logger.exception("Received an exception") + + # THEN: We expect the log record is not buffered + log = capture_logging_output(stdout) + assert "Received an exception" == log["message"] + + +def test_logger_buffer_is_never_buffered_with_error_new(stdout, service_name): + # GIVEN: A logger configured with buffer + logger_buffer_config = LoggerBufferConfig(max_size=10240) + logger = Logger(service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + # WHEN: An exception is raised and logged + logger.error("Received an exception") + + # THEN: We expect the log record is not buffered + log = capture_logging_output(stdout) + assert "Received an exception" == log["message"] diff --git a/tests/unit/logger/required_dependencies/test_logger_buffer_config.py b/tests/unit/logger/required_dependencies/test_logger_buffer_config.py index dca63d16f3e..7a107b7d008 100644 --- a/tests/unit/logger/required_dependencies/test_logger_buffer_config.py +++ b/tests/unit/logger/required_dependencies/test_logger_buffer_config.py @@ -63,7 +63,7 @@ def test_invalid_log_level(): def test_case_insensitive_log_level(): # GIVEN - test_cases = ["debug", "Info", "WARNING", "error", "CRITICAL"] + test_cases = ["debug", "Info", "WARNING"] # WHEN / THEN for log_level in test_cases: From dbfd70856853d7cb58eecfbe8afcb8cdae846917 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 13 Feb 2025 17:33:49 +0000 Subject: [PATCH 08/40] Adding initial logic + test --- aws_lambda_powertools/logging/buffer/config.py | 4 ++-- aws_lambda_powertools/logging/logger.py | 3 +-- aws_lambda_powertools/logging/types.py | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/config.py b/aws_lambda_powertools/logging/buffer/config.py index ffd9dfdceb4..9d14fe2375e 100644 --- a/aws_lambda_powertools/logging/buffer/config.py +++ b/aws_lambda_powertools/logging/buffer/config.py @@ -3,7 +3,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from aws_lambda_powertools.logging.types import LOG_LEVEL_VALUES + from aws_lambda_powertools.logging.types import LOG_LEVEL_BUFFER_VALUES class LoggerBufferConfig: @@ -17,7 +17,7 @@ class LoggerBufferConfig: def __init__( self, max_size: int = 10240, - minimum_log_level: LOG_LEVEL_VALUES = "DEBUG", + minimum_log_level: LOG_LEVEL_BUFFER_VALUES = "DEBUG", flush_on_error: bool = True, compress: bool = False, ): diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index dac72ba224b..92c80126e9a 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -39,7 +39,6 @@ if TYPE_CHECKING: from aws_lambda_powertools.logging.buffer.config import LoggerBufferConfig - from aws_lambda_powertools.logging.types import LOG_LEVEL_VALUES from aws_lambda_powertools.shared.types import AnyCallableT @@ -205,7 +204,7 @@ class Logger: def __init__( self, service: str | None = None, - level: LOG_LEVEL_VALUES | int | None = None, + level: str | int | None = None, child: bool = False, sampling_rate: float | None = None, stream: IO[str] | None = None, diff --git a/aws_lambda_powertools/logging/types.py b/aws_lambda_powertools/logging/types.py index 7d298dd7412..4d8ff853e0e 100644 --- a/aws_lambda_powertools/logging/types.py +++ b/aws_lambda_powertools/logging/types.py @@ -5,7 +5,7 @@ if TYPE_CHECKING: from typing_extensions import NotRequired, TypeAlias -LOG_LEVEL_VALUES = Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] +LOG_LEVEL_BUFFER_VALUES = Literal["DEBUG", "INFO", "WARNING"] class PowertoolsLogRecord(TypedDict): From 93f4a1352e33416b29ce9a5dd70b201d47274a7d Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 13 Feb 2025 18:10:43 +0000 Subject: [PATCH 09/40] Adding initial logic + test --- aws_lambda_powertools/logging/logger.py | 28 ++++++++++- .../test_powertools_logger_buffer.py | 48 +++++++++++++++++++ 2 files changed, 74 insertions(+), 2 deletions(-) diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 92c80126e9a..161b2c747bb 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -463,6 +463,10 @@ def _add_log_to_buffer(self, level, msg, filename, line, function, **kwargs): return True + def flush_buffer(self): + ## INITIAL IMPLEMENTATION + self._logger.debug("Buffer was flushed") + def debug( self, msg: object, @@ -504,7 +508,7 @@ def info( extra = extra or {} extra = {**extra, **kwargs} - # Buffer is not active, flushing + # Buffer is not active and we need to flush if not self._logger_buffer: return self._logger.info( msg, @@ -515,6 +519,7 @@ def info( extra=extra, ) + # Buffer log level is higher than this log level and we need to flush if _resolve_buffer_log_level(self._logger_buffer.minimum_log_level, "INFO"): return self._logger.info( msg, @@ -542,7 +547,7 @@ def warning( extra = extra or {} extra = {**extra, **kwargs} - # Buffer is not active, flushing + # Buffer is not active and we need to flush if not self._logger_buffer: return self._logger.warning( msg, @@ -553,6 +558,7 @@ def warning( extra=extra, ) + # Buffer log level is higher than this log level and we need to flush if _resolve_buffer_log_level(self._logger_buffer.minimum_log_level, "WARNING"): return self._logger.warning( msg, @@ -580,6 +586,12 @@ def error( extra = extra or {} extra = {**extra, **kwargs} + # Buffer is active and an error happened + # LoggerBufferConfig flush_on_error is True + # So, we need to flush the buffer + if self._logger_buffer and self._logger_buffer.flush_on_error: + self.flush_buffer() + return self._logger.error( msg, *args, @@ -602,6 +614,12 @@ def critical( extra = extra or {} extra = {**extra, **kwargs} + # Buffer is active and an error happened + # LoggerBufferConfig flush_on_error is True + # So, we need to flush the buffer + if self._logger_buffer and self._logger_buffer.flush_on_error: + self.flush_buffer() + return self._logger.critical( msg, *args, @@ -624,6 +642,12 @@ def exception( extra = extra or {} extra = {**extra, **kwargs} + # Buffer is active and an error happened + # LoggerBufferConfig flush_on_error is True + # So, we need to flush the buffer + if self._logger_buffer and self._logger_buffer.flush_on_error: + self.flush_buffer() + return self._logger.exception( msg, *args, diff --git a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py index 01e71047196..293f2c0347b 100644 --- a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py +++ b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py @@ -92,3 +92,51 @@ def test_logger_buffer_is_never_buffered_with_error_new(stdout, service_name): # THEN: We expect the log record is not buffered log = capture_logging_output(stdout) assert "Received an exception" == log["message"] + + +@pytest.mark.parametrize("log_level", ["CRITICAL", "ERROR"]) +def test_logger_buffer_is_flushed_when_an_error_happens(stdout, service_name, log_level): + # GIVEN: A logger configured with buffer + logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG", flush_on_error=True) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + logger.debug("this log line will be flushed") + logger.debug("this log line will be flushed too") + + log_command = { + "CRITICAL": logger.critical, + "ERROR": logger.error, + "EXCEPTION": logger.exception, + } + + # WHEN a log message is sent using the corresponding log method + log_message = log_command[log_level] + log_message("Received an exception") + + # THEN: We expect the log record is not buffered + log = capture_multiple_logging_statements_output(stdout) + assert "Buffer was flushed" == log[0]["message"] + + +@pytest.mark.parametrize("log_level", ["CRITICAL", "ERROR"]) +def test_logger_buffer_is_not_flushed_when_an_error_happens(stdout, service_name, log_level): + # GIVEN: A logger configured with buffer + logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG", flush_on_error=False) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + logger.debug("this log line will be flushed") + logger.debug("this log line will be flushed too") + + log_command = { + "CRITICAL": logger.critical, + "ERROR": logger.error, + "EXCEPTION": logger.exception, + } + + # WHEN a log message is sent using the corresponding log method + log_message = log_command[log_level] + log_message("Received an exception") + + # THEN: We expect the log record is not buffered + log = capture_logging_output(stdout) + assert "Received an exception" == log["message"] From 35acff51b9abc6f7b4dd033a92c26facc1ec0d46 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 13 Feb 2025 18:32:22 +0000 Subject: [PATCH 10/40] Unit tests for log comparasion --- .../logging/buffer/functions.py | 2 +- .../test_logger_buffer_functions.py | 36 +++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 tests/unit/logger/required_dependencies/test_logger_buffer_functions.py diff --git a/aws_lambda_powertools/logging/buffer/functions.py b/aws_lambda_powertools/logging/buffer/functions.py index c051c0b78b6..d3ce8ea0c11 100644 --- a/aws_lambda_powertools/logging/buffer/functions.py +++ b/aws_lambda_powertools/logging/buffer/functions.py @@ -36,7 +36,7 @@ def _resolve_buffer_log_level(buffer_log_level, current_log_level): current_level_num = log_levels.get(current_log_level.upper()) # Compare numeric levels - if buffer_level_num < current_level_num: + if buffer_level_num <= current_level_num: return True return False diff --git a/tests/unit/logger/required_dependencies/test_logger_buffer_functions.py b/tests/unit/logger/required_dependencies/test_logger_buffer_functions.py new file mode 100644 index 00000000000..d06839233b8 --- /dev/null +++ b/tests/unit/logger/required_dependencies/test_logger_buffer_functions.py @@ -0,0 +1,36 @@ +from aws_lambda_powertools.logging.buffer.functions import _resolve_buffer_log_level + + +def test_resolve_buffer_log_level_comparison(): + # Test cases where buffer level is lower than current level (should return True) + assert _resolve_buffer_log_level("DEBUG", "INFO") is True + assert _resolve_buffer_log_level("DEBUG", "WARNING") is True + assert _resolve_buffer_log_level("DEBUG", "ERROR") is True + assert _resolve_buffer_log_level("INFO", "WARNING") is True + assert _resolve_buffer_log_level("INFO", "ERROR") is True + assert _resolve_buffer_log_level("WARNING", "ERROR") is True + + # Test cases where buffer level is equal to current level (should return True) + assert _resolve_buffer_log_level("INFO", "INFO") is True + assert _resolve_buffer_log_level("DEBUG", "DEBUG") is True + assert _resolve_buffer_log_level("WARNING", "WARNING") is True + assert _resolve_buffer_log_level("ERROR", "ERROR") is True + assert _resolve_buffer_log_level("CRITICAL", "CRITICAL") is True + + # Test cases where buffer level is higher than current level (should return False) + assert _resolve_buffer_log_level("ERROR", "DEBUG") is False + assert _resolve_buffer_log_level("CRITICAL", "INFO") is False + assert _resolve_buffer_log_level("ERROR", "WARNING") is False + + +def test_resolve_buffer_log_level_case_insensitivity(): + # Test case insensitivity + assert _resolve_buffer_log_level("debug", "INFO") is True + assert _resolve_buffer_log_level("DEBUG", "info") is True + assert _resolve_buffer_log_level("Debug", "Info") is True + + +def test_resolve_buffer_log_level_edge_cases(): + # Additional edge cases + assert _resolve_buffer_log_level("DEBUG", "CRITICAL") is True + assert _resolve_buffer_log_level("CRITICAL", "DEBUG") is False From ed3902da1925e0da2f4ec53b6bdf16f0bcc8253e Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 13 Feb 2025 19:23:01 +0000 Subject: [PATCH 11/40] Fixing logic to handle log flush --- .../logging/buffer/functions.py | 4 +- aws_lambda_powertools/logging/logger.py | 16 ++++---- .../test_logger_buffer_functions.py | 37 ++++++++----------- 3 files changed, 25 insertions(+), 32 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/functions.py b/aws_lambda_powertools/logging/buffer/functions.py index d3ce8ea0c11..11f4b6604b8 100644 --- a/aws_lambda_powertools/logging/buffer/functions.py +++ b/aws_lambda_powertools/logging/buffer/functions.py @@ -21,7 +21,7 @@ def _create_buffer_record(level: str, msg: object, args: object, **kwargs) -> di } -def _resolve_buffer_log_level(buffer_log_level, current_log_level): +def _check_minimum_buffer_log_level(buffer_log_level, current_log_level): # Define log level mapping log_levels = { "DEBUG": 10, @@ -36,7 +36,7 @@ def _resolve_buffer_log_level(buffer_log_level, current_log_level): current_level_num = log_levels.get(current_log_level.upper()) # Compare numeric levels - if buffer_level_num <= current_level_num: + if buffer_level_num < current_level_num: return True return False diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 161b2c747bb..d269e181abd 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -17,7 +17,7 @@ from typing import IO, TYPE_CHECKING, Any, Callable, Generator, Iterable, Mapping, TypeVar, overload from aws_lambda_powertools.logging.buffer.cache import LoggerBufferCache -from aws_lambda_powertools.logging.buffer.functions import _create_buffer_record, _resolve_buffer_log_level +from aws_lambda_powertools.logging.buffer.functions import _check_minimum_buffer_log_level, _create_buffer_record from aws_lambda_powertools.logging.constants import ( LOGGER_ATTRIBUTE_PRECONFIGURED, ) @@ -86,7 +86,7 @@ class Logger: ---------- service : str, optional service name to be appended in logs, by default "service_undefined" - level : Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], int optional + level : str, int optional The level to set. Can be a string representing the level name: 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL' or an integer representing the level value: 10 for 'DEBUG', 20 for 'INFO', 30 for 'WARNING', 40 for 'ERROR', 50 for 'CRITICAL'. by default "INFO" @@ -457,7 +457,7 @@ def decorate(event, context, *args, **kwargs): return decorate - def _add_log_to_buffer(self, level, msg, filename, line, function, **kwargs): + def _add_log_line_to_buffer(self, level, msg, filename, line, function, **kwargs): # Initial implementation, will always cache self._buffer_cache.add("XRAY_ID", msg) @@ -493,7 +493,7 @@ def debug( log_record = _create_buffer_record(level="DEBUG", msg=msg, args=args, **kwargs) - self._add_log_to_buffer(**log_record) + self._add_log_line_to_buffer(**log_record) def info( self, @@ -520,7 +520,7 @@ def info( ) # Buffer log level is higher than this log level and we need to flush - if _resolve_buffer_log_level(self._logger_buffer.minimum_log_level, "INFO"): + if _check_minimum_buffer_log_level(self._logger_buffer.minimum_log_level, "INFO"): return self._logger.info( msg, *args, @@ -532,7 +532,7 @@ def info( log_record: dict[str, Any] = _create_buffer_record(level="INFO", msg=msg, args=args, **kwargs) - self._add_log_to_buffer(**log_record) + self._add_log_line_to_buffer(**log_record) def warning( self, @@ -559,7 +559,7 @@ def warning( ) # Buffer log level is higher than this log level and we need to flush - if _resolve_buffer_log_level(self._logger_buffer.minimum_log_level, "WARNING"): + if _check_minimum_buffer_log_level(self._logger_buffer.minimum_log_level, "WARNING"): return self._logger.warning( msg, *args, @@ -571,7 +571,7 @@ def warning( log_record = _create_buffer_record(level="WARNING", msg=msg, args=args, **kwargs) - self._add_log_to_buffer(**log_record) + self._add_log_line_to_buffer(**log_record) def error( self, diff --git a/tests/unit/logger/required_dependencies/test_logger_buffer_functions.py b/tests/unit/logger/required_dependencies/test_logger_buffer_functions.py index d06839233b8..5a714b095d2 100644 --- a/tests/unit/logger/required_dependencies/test_logger_buffer_functions.py +++ b/tests/unit/logger/required_dependencies/test_logger_buffer_functions.py @@ -1,36 +1,29 @@ -from aws_lambda_powertools.logging.buffer.functions import _resolve_buffer_log_level +from aws_lambda_powertools.logging.buffer.functions import _check_minimum_buffer_log_level def test_resolve_buffer_log_level_comparison(): # Test cases where buffer level is lower than current level (should return True) - assert _resolve_buffer_log_level("DEBUG", "INFO") is True - assert _resolve_buffer_log_level("DEBUG", "WARNING") is True - assert _resolve_buffer_log_level("DEBUG", "ERROR") is True - assert _resolve_buffer_log_level("INFO", "WARNING") is True - assert _resolve_buffer_log_level("INFO", "ERROR") is True - assert _resolve_buffer_log_level("WARNING", "ERROR") is True - - # Test cases where buffer level is equal to current level (should return True) - assert _resolve_buffer_log_level("INFO", "INFO") is True - assert _resolve_buffer_log_level("DEBUG", "DEBUG") is True - assert _resolve_buffer_log_level("WARNING", "WARNING") is True - assert _resolve_buffer_log_level("ERROR", "ERROR") is True - assert _resolve_buffer_log_level("CRITICAL", "CRITICAL") is True + assert _check_minimum_buffer_log_level("DEBUG", "INFO") is True + assert _check_minimum_buffer_log_level("DEBUG", "WARNING") is True + assert _check_minimum_buffer_log_level("DEBUG", "ERROR") is True + assert _check_minimum_buffer_log_level("INFO", "WARNING") is True + assert _check_minimum_buffer_log_level("INFO", "ERROR") is True + assert _check_minimum_buffer_log_level("WARNING", "ERROR") is True # Test cases where buffer level is higher than current level (should return False) - assert _resolve_buffer_log_level("ERROR", "DEBUG") is False - assert _resolve_buffer_log_level("CRITICAL", "INFO") is False - assert _resolve_buffer_log_level("ERROR", "WARNING") is False + assert _check_minimum_buffer_log_level("ERROR", "DEBUG") is False + assert _check_minimum_buffer_log_level("CRITICAL", "INFO") is False + assert _check_minimum_buffer_log_level("ERROR", "WARNING") is False def test_resolve_buffer_log_level_case_insensitivity(): # Test case insensitivity - assert _resolve_buffer_log_level("debug", "INFO") is True - assert _resolve_buffer_log_level("DEBUG", "info") is True - assert _resolve_buffer_log_level("Debug", "Info") is True + assert _check_minimum_buffer_log_level("debug", "INFO") is True + assert _check_minimum_buffer_log_level("DEBUG", "info") is True + assert _check_minimum_buffer_log_level("Debug", "Info") is True def test_resolve_buffer_log_level_edge_cases(): # Additional edge cases - assert _resolve_buffer_log_level("DEBUG", "CRITICAL") is True - assert _resolve_buffer_log_level("CRITICAL", "DEBUG") is False + assert _check_minimum_buffer_log_level("DEBUG", "CRITICAL") is True + assert _check_minimum_buffer_log_level("CRITICAL", "DEBUG") is False From e39229aecc9c8cb71d0744dc6818b1f0edc8db69 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 13 Feb 2025 19:40:56 +0000 Subject: [PATCH 12/40] Start flushing logs --- aws_lambda_powertools/logging/logger.py | 23 +++++++++++-------- .../test_powertools_logger_buffer.py | 7 ++++-- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index d269e181abd..465f023c5dd 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -457,15 +457,20 @@ def decorate(event, context, *args, **kwargs): return decorate - def _add_log_line_to_buffer(self, level, msg, filename, line, function, **kwargs): + def _add_log_line_to_buffer(self, log_record: dict[str, Any]): # Initial implementation, will always cache - self._buffer_cache.add("XRAY_ID", msg) - - return True + # Add logic for "empty" + tracer_id = os.getenv(constants.XRAY_TRACE_ID_ENV, None) + if tracer_id: + self._buffer_cache.add(tracer_id, log_record) def flush_buffer(self): - ## INITIAL IMPLEMENTATION - self._logger.debug("Buffer was flushed") + # Initial logic + tracer_id = os.getenv(constants.XRAY_TRACE_ID_ENV, None) + for item in self._buffer_cache.get(tracer_id): + self._logger.debug(item["msg"]) + + self._buffer_cache.clear() def debug( self, @@ -493,7 +498,7 @@ def debug( log_record = _create_buffer_record(level="DEBUG", msg=msg, args=args, **kwargs) - self._add_log_line_to_buffer(**log_record) + self._add_log_line_to_buffer(log_record) def info( self, @@ -532,7 +537,7 @@ def info( log_record: dict[str, Any] = _create_buffer_record(level="INFO", msg=msg, args=args, **kwargs) - self._add_log_line_to_buffer(**log_record) + self._add_log_line_to_buffer(log_record) def warning( self, @@ -571,7 +576,7 @@ def warning( log_record = _create_buffer_record(level="WARNING", msg=msg, args=args, **kwargs) - self._add_log_line_to_buffer(**log_record) + self._add_log_line_to_buffer(log_record) def error( self, diff --git a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py index 293f2c0347b..4386baf2af1 100644 --- a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py +++ b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py @@ -10,6 +10,7 @@ from aws_lambda_powertools import Logger from aws_lambda_powertools.logging.buffer import LoggerBufferConfig +from aws_lambda_powertools.shared import constants @pytest.fixture @@ -95,8 +96,9 @@ def test_logger_buffer_is_never_buffered_with_error_new(stdout, service_name): @pytest.mark.parametrize("log_level", ["CRITICAL", "ERROR"]) -def test_logger_buffer_is_flushed_when_an_error_happens(stdout, service_name, log_level): +def test_logger_buffer_is_flushed_when_an_error_happens(stdout, service_name, log_level, monkeypatch): # GIVEN: A logger configured with buffer + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG", flush_on_error=True) logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) @@ -115,7 +117,8 @@ def test_logger_buffer_is_flushed_when_an_error_happens(stdout, service_name, lo # THEN: We expect the log record is not buffered log = capture_multiple_logging_statements_output(stdout) - assert "Buffer was flushed" == log[0]["message"] + assert "this log line will be flushed" == log[0]["message"] + assert "this log line will be flushed too" == log[1]["message"] @pytest.mark.parametrize("log_level", ["CRITICAL", "ERROR"]) From 27b1ce9396cd119d1a96f93eaa7bea341ecbb3b7 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 13 Feb 2025 23:11:15 +0000 Subject: [PATCH 13/40] Adding more logic --- .../logging/buffer/functions.py | 13 +- aws_lambda_powertools/logging/logger.py | 162 ++++++++++++++---- .../test_powertools_logger_buffer.py | 17 +- 3 files changed, 158 insertions(+), 34 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/functions.py b/aws_lambda_powertools/logging/buffer/functions.py index 11f4b6604b8..f8b0be4c66d 100644 --- a/aws_lambda_powertools/logging/buffer/functions.py +++ b/aws_lambda_powertools/logging/buffer/functions.py @@ -2,21 +2,26 @@ import inspect import time -from typing import Any +from typing import Any, Mapping -def _create_buffer_record(level: str, msg: object, args: object, **kwargs) -> dict[str, Any]: +def _create_buffer_record( + level: int, + msg: object, + args: object, + extra: Mapping[str, object] | None = None, +) -> dict[str, Any]: caller_frame = inspect.stack()[2] timestamp = time.time() return { - "level": level.upper(), + "level": level, "msg": msg, "args": args, "filename": caller_frame.filename, "line": caller_frame.lineno, "function": caller_frame.function, - "extra_kwargs": kwargs or {}, + "extra": extra, "timestamp": timestamp, } diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 465f023c5dd..27a08d85273 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -457,18 +457,73 @@ def decorate(event, context, *args, **kwargs): return decorate - def _add_log_line_to_buffer(self, log_record: dict[str, Any]): - # Initial implementation, will always cache - # Add logic for "empty" + def _create_and_flush_log_record(self, log_line: dict) -> None: + """ + Create and immediately flush a log record to the configured logger. + + Parameters + ---------- + log_line : dict[str, Any] + Dictionary containing log record details with keys: + - 'level': Logging level + - 'filename': Source filename + - 'line': Line number + - 'msg': Log message + - 'function': Source function name + - 'extra': Additional context + - 'timestamp': Original log creation time + + Notes + ----- + Bypasses standard logging flow by directly creating and handling a log record. + Preserves original timestamp and source information. + """ + record = self._logger.makeRecord( + name=self.name, + level=log_line["level"], + fn=log_line["filename"], + lno=log_line["line"], + msg=log_line["msg"], + args=(), + exc_info=None, + func=log_line["function"], + extra=log_line["extra"], + ) + record.created = log_line["timestamp"] + self._logger.handle(record) + + def _add_log_record_to_buffer( + self, + level: int, + msg: object, + args: object, + exc_info: logging._ExcInfoType, + stack_info: bool, + extra: Mapping[str, object], + ): tracer_id = os.getenv(constants.XRAY_TRACE_ID_ENV, None) if tracer_id: + log_record: dict[str, Any] = _create_buffer_record(level=level, msg=msg, args=args, extra=extra) self._buffer_cache.add(tracer_id, log_record) def flush_buffer(self): - # Initial logic + """ + Flush all buffered log records associated with current trace ID. + + Notes + ----- + Retrieves log records for current trace from buffer + Immediately processes and logs each record + Clears buffer after complete processing + + Raises + ------ + Any exceptions from underlying logging or buffer mechanisms + will be propagated to caller + """ tracer_id = os.getenv(constants.XRAY_TRACE_ID_ENV, None) - for item in self._buffer_cache.get(tracer_id): - self._logger.debug(item["msg"]) + for log_line in self._buffer_cache.get(tracer_id): + self._create_and_flush_log_record(log_line) self._buffer_cache.clear() @@ -485,7 +540,16 @@ def debug( extra = extra or {} extra = {**extra, **kwargs} - # Buffer is not active, flushing + # Logging workflow for logging.debug: + # 1. Buffer is completely disabled - log right away + # 2. DEBUG is the maximum level of buffer, so, can't bypass if enabled + # 3. Store in buffer for potential later processing + + # MAINTAINABILITY_DECISION: + # Keeping this implementation to avoid complex code handling. + # Also for clarity over complexity + + # Buffer is not active and we need to log immediately if not self._logger_buffer: return self._logger.debug( msg, @@ -496,9 +560,15 @@ def debug( extra=extra, ) - log_record = _create_buffer_record(level="DEBUG", msg=msg, args=args, **kwargs) - - self._add_log_line_to_buffer(log_record) + # Store record in the buffer + self._add_log_record_to_buffer( + level=logging.DEBUG, + msg=msg, + args=args, + exc_info=exc_info, + stack_info=stack_info, + extra=extra, + ) def info( self, @@ -513,7 +583,16 @@ def info( extra = extra or {} extra = {**extra, **kwargs} - # Buffer is not active and we need to flush + # Logging workflow for logging.info: + # 1. Buffer is completely disabled - log right away + # 2. Log severity exceeds buffer's minimum threshold - bypass buffering + # 3. If neither condition met, store in buffer for potential later processing + + # MAINTAINABILITY_DECISION: + # Keeping this implementation to avoid complex code handling. + # Also for clarity over complexity + + # Buffer is not active and we need to log immediately if not self._logger_buffer: return self._logger.info( msg, @@ -524,7 +603,7 @@ def info( extra=extra, ) - # Buffer log level is higher than this log level and we need to flush + # Bypass buffer when log severity meets or exceeds configured minimum if _check_minimum_buffer_log_level(self._logger_buffer.minimum_log_level, "INFO"): return self._logger.info( msg, @@ -535,9 +614,15 @@ def info( extra=extra, ) - log_record: dict[str, Any] = _create_buffer_record(level="INFO", msg=msg, args=args, **kwargs) - - self._add_log_line_to_buffer(log_record) + # Store record in the buffer + self._add_log_record_to_buffer( + level=logging.INFO, + msg=msg, + args=args, + exc_info=exc_info, + stack_info=stack_info, + extra=extra, + ) def warning( self, @@ -552,7 +637,16 @@ def warning( extra = extra or {} extra = {**extra, **kwargs} - # Buffer is not active and we need to flush + # Logging workflow for logging.warning: + # 1. Buffer is completely disabled - log right away + # 2. Log severity exceeds buffer's minimum threshold - bypass buffering + # 3. If neither condition met, store in buffer for potential later processing + + # MAINTAINABILITY_DECISION: + # Keeping this implementation to avoid complex code handling. + # Also for clarity over complexity + + # Buffer is not active and we need to log immediately if not self._logger_buffer: return self._logger.warning( msg, @@ -563,7 +657,7 @@ def warning( extra=extra, ) - # Buffer log level is higher than this log level and we need to flush + # Bypass buffer when log severity meets or exceeds configured minimum if _check_minimum_buffer_log_level(self._logger_buffer.minimum_log_level, "WARNING"): return self._logger.warning( msg, @@ -574,9 +668,15 @@ def warning( extra=extra, ) - log_record = _create_buffer_record(level="WARNING", msg=msg, args=args, **kwargs) - - self._add_log_line_to_buffer(log_record) + # Store record in the buffer + self._add_log_record_to_buffer( + level=logging.WARNING, + msg=msg, + args=args, + exc_info=exc_info, + stack_info=stack_info, + extra=extra, + ) def error( self, @@ -591,9 +691,11 @@ def error( extra = extra or {} extra = {**extra, **kwargs} - # Buffer is active and an error happened - # LoggerBufferConfig flush_on_error is True - # So, we need to flush the buffer + # Workflow: Error Logging with automatic buffer flushing + # 1. Buffer configuration checked for immediate flush + # 2. If auto-flush enabled, trigger complete buffer processing + # 3. Error log is not "bufferable", so ensure error log is immediately available + if self._logger_buffer and self._logger_buffer.flush_on_error: self.flush_buffer() @@ -619,9 +721,10 @@ def critical( extra = extra or {} extra = {**extra, **kwargs} - # Buffer is active and an error happened - # LoggerBufferConfig flush_on_error is True - # So, we need to flush the buffer + # Workflow: Error Logging with automatic buffer flushing + # 1. Buffer configuration checked for immediate flush + # 2. If auto-flush enabled, trigger complete buffer processing + # 3. Critical log is not "bufferable", so ensure error log is immediately available if self._logger_buffer and self._logger_buffer.flush_on_error: self.flush_buffer() @@ -647,9 +750,10 @@ def exception( extra = extra or {} extra = {**extra, **kwargs} - # Buffer is active and an error happened - # LoggerBufferConfig flush_on_error is True - # So, we need to flush the buffer + # Workflow: Error Logging with automatic buffer flushing + # 1. Buffer configuration checked for immediate flush + # 2. If auto-flush enabled, trigger complete buffer processing + # 3. Exception log is not "bufferable", so ensure error log is immediately available if self._logger_buffer and self._logger_buffer.flush_on_error: self.flush_buffer() diff --git a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py index 4386baf2af1..84418e3c9f8 100644 --- a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py +++ b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py @@ -82,7 +82,7 @@ def test_logger_buffer_is_never_buffered_with_exception(stdout, service_name): assert "Received an exception" == log["message"] -def test_logger_buffer_is_never_buffered_with_error_new(stdout, service_name): +def test_logger_buffer_is_never_buffered_with_error(stdout, service_name): # GIVEN: A logger configured with buffer logger_buffer_config = LoggerBufferConfig(max_size=10240) logger = Logger(service=service_name, stream=stdout, logger_buffer=logger_buffer_config) @@ -143,3 +143,18 @@ def test_logger_buffer_is_not_flushed_when_an_error_happens(stdout, service_name # THEN: We expect the log record is not buffered log = capture_logging_output(stdout) assert "Received an exception" == log["message"] + + +def test_create_and_flush_logs(stdout, service_name, monkeypatch): + # GIVEN: A logger configured with buffer + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG", flush_on_error=True) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + logger.debug("this log line will be flushed") + + logger.flush_buffer() + + # THEN: We expect the log record is not buffered + log = capture_multiple_logging_statements_output(stdout) + assert "this log line will be flushed" == log[0]["message"] From e97d0652684d1860d71a0e42b9a06c9f918269e6 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 21 Feb 2025 23:30:44 +0000 Subject: [PATCH 14/40] Adding more logic --- aws_lambda_powertools/logging/buffer/cache.py | 2 + aws_lambda_powertools/logging/constants.py | 2 + aws_lambda_powertools/logging/exceptions.py | 8 ++ aws_lambda_powertools/logging/logger.py | 79 +++++++++++++++++-- aws_lambda_powertools/shared/functions.py | 5 ++ .../test_powertools_logger_buffer.py | 66 ++++++++++++++++ 6 files changed, 155 insertions(+), 7 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/cache.py b/aws_lambda_powertools/logging/buffer/cache.py index 88ab977354e..67911e8ee2d 100644 --- a/aws_lambda_powertools/logging/buffer/cache.py +++ b/aws_lambda_powertools/logging/buffer/cache.py @@ -20,6 +20,7 @@ def __init__(self, max_size_bytes: int): self.max_size_bytes: int = max_size_bytes self.cache: dict[str, deque] = {} self.current_size: dict[str, int] = {} + self.has_evicted: bool = False def add(self, key: str, item: Any) -> None: """ @@ -53,6 +54,7 @@ def add(self, key: str, item: Any) -> None: while self.current_size[key] + item_size > self.max_size_bytes and self.cache[key]: removed_item = self.cache[key].popleft() self.current_size[key] -= len(str(removed_item)) + self.has_evicted = True self.cache[key].append(item) self.current_size[key] += item_size diff --git a/aws_lambda_powertools/logging/constants.py b/aws_lambda_powertools/logging/constants.py index c98204f9bb1..01549a52d4b 100644 --- a/aws_lambda_powertools/logging/constants.py +++ b/aws_lambda_powertools/logging/constants.py @@ -3,3 +3,5 @@ # logger.init attribute is set when Logger has been configured LOGGER_ATTRIBUTE_PRECONFIGURED = "init" LOGGER_ATTRIBUTE_HANDLER = "logger_handler" + +LOGGER_BUFFER_FIRST_INVOKE = "FIRST_INVOKE" diff --git a/aws_lambda_powertools/logging/exceptions.py b/aws_lambda_powertools/logging/exceptions.py index 17b1c837b71..4c489fd32c2 100644 --- a/aws_lambda_powertools/logging/exceptions.py +++ b/aws_lambda_powertools/logging/exceptions.py @@ -12,3 +12,11 @@ class OrphanedChildLoggerError(Exception): """ pass + + +class InvalidBufferItem(Exception): + """ + Raised when a buffer item exceeds the maximum allowed buffer size. + """ + + pass diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 605b4ec97d8..270ae4a024c 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -22,8 +22,13 @@ LOGGER_ATTRIBUTE_HANDLER, LOGGER_ATTRIBUTE_POWERTOOLS_HANDLER, LOGGER_ATTRIBUTE_PRECONFIGURED, + LOGGER_BUFFER_FIRST_INVOKE, +) +from aws_lambda_powertools.logging.exceptions import ( + InvalidBufferItem, + InvalidLoggerSamplingRateError, + OrphanedChildLoggerError, ) -from aws_lambda_powertools.logging.exceptions import InvalidLoggerSamplingRateError, OrphanedChildLoggerError from aws_lambda_powertools.logging.filters import SuppressFilter from aws_lambda_powertools.logging.formatter import ( RESERVED_FORMATTER_CUSTOM_KEYS, @@ -34,10 +39,12 @@ from aws_lambda_powertools.shared import constants from aws_lambda_powertools.shared.functions import ( extract_event_from_common_models, + get_tracer_id, resolve_env_var_choice, resolve_truthy_env_var_choice, ) from aws_lambda_powertools.utilities import jmespath_utils +from aws_lambda_powertools.warnings import PowertoolsUserWarning if TYPE_CHECKING: from aws_lambda_powertools.logging.buffer.config import LoggerBufferConfig @@ -518,14 +525,62 @@ def _add_log_record_to_buffer( level: int, msg: object, args: object, - exc_info: logging._ExcInfoType, - stack_info: bool, - extra: Mapping[str, object], + exc_info: logging._ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ): - tracer_id = os.getenv(constants.XRAY_TRACE_ID_ENV, None) - if tracer_id: + """ + Add log record to buffer with intelligent tracer ID handling. + + Parameters + ---------- + level : int + Logging level of the record. + msg : object + Log message to be recorded. + args : object + Additional arguments for the log message. + exc_info : logging._ExcInfoType, optional + Exception information for the log record. + stack_info : bool, optional + Whether to include stack information. + extra : Mapping[str, object], optional + Additional contextual information for the log record. + + Raises + ------ + InvalidBufferItem + If the log record cannot be added to the buffer. + + Notes + ----- + Handles special first invocation buffering and migration of log records + between different tracer contexts. + """ + # Determine tracer ID, defaulting to first invoke marker + tracer_id = get_tracer_id() or LOGGER_BUFFER_FIRST_INVOKE + + try: + # Create log record for buffering log_record: dict[str, Any] = _create_buffer_record(level=level, msg=msg, args=args, extra=extra) + + # Migrate log records from first invoke to current tracer context + if tracer_id != LOGGER_BUFFER_FIRST_INVOKE and self._buffer_cache.get(LOGGER_BUFFER_FIRST_INVOKE): + # Retrieve first invoke log records + first_invoke_items = self._buffer_cache.get(LOGGER_BUFFER_FIRST_INVOKE) + + # Transfer log records to current tracer context + for item in first_invoke_items: + self._buffer_cache.add(tracer_id, item) + + # Clear first invoke buffer + self._buffer_cache.clear(LOGGER_BUFFER_FIRST_INVOKE) + + # Add current log record to buffer self._buffer_cache.add(tracer_id, log_record) + except InvalidBufferItem as exc: + # Wrap and re-raise buffer addition error + raise InvalidBufferItem("Cannot add item to the buffer") from exc def flush_buffer(self): """ @@ -535,6 +590,7 @@ def flush_buffer(self): ----- Retrieves log records for current trace from buffer Immediately processes and logs each record + Warning if some cache was evicted in that execution Clears buffer after complete processing Raises @@ -542,10 +598,19 @@ def flush_buffer(self): Any exceptions from underlying logging or buffer mechanisms will be propagated to caller """ - tracer_id = os.getenv(constants.XRAY_TRACE_ID_ENV, None) + tracer_id = get_tracer_id() for log_line in self._buffer_cache.get(tracer_id): self._create_and_flush_log_record(log_line) + if self._buffer_cache.has_evicted: + warnings.warn( + message="Some logs are not displayed because they were evicted from the buffer. " + "Increase buffer size to store more logs in the buffer", + category=PowertoolsUserWarning, + stacklevel=2, + ) + self._buffer_cache.has_evicted = False + self._buffer_cache.clear() def debug( diff --git a/aws_lambda_powertools/shared/functions.py b/aws_lambda_powertools/shared/functions.py index 18f3ec49351..4e961d4aee0 100644 --- a/aws_lambda_powertools/shared/functions.py +++ b/aws_lambda_powertools/shared/functions.py @@ -283,3 +283,8 @@ def abs_lambda_path(relative_path: str = "") -> str: def sanitize_xray_segment_name(name: str) -> str: return re.sub(constants.INVALID_XRAY_NAME_CHARACTERS, "", name) + + +def get_tracer_id() -> str | None: + xray_trace_id = os.getenv(constants.XRAY_TRACE_ID_ENV) + return xray_trace_id.split(";")[0].replace("Root=", "") if xray_trace_id else None diff --git a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py index 84418e3c9f8..f4c29e1bbb6 100644 --- a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py +++ b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py @@ -4,6 +4,7 @@ import json import random import string +import warnings from collections import namedtuple import pytest @@ -11,6 +12,7 @@ from aws_lambda_powertools import Logger from aws_lambda_powertools.logging.buffer import LoggerBufferConfig from aws_lambda_powertools.shared import constants +from aws_lambda_powertools.warnings import PowertoolsUserWarning @pytest.fixture @@ -158,3 +160,67 @@ def test_create_and_flush_logs(stdout, service_name, monkeypatch): # THEN: We expect the log record is not buffered log = capture_multiple_logging_statements_output(stdout) assert "this log line will be flushed" == log[0]["message"] + + +def test_create_buffer_with_item_overflow(stdout, service_name, monkeypatch): + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + + # GIVEN: A logger configured with 2 bytes + logger_buffer_config = LoggerBufferConfig(max_size=2, minimum_log_level="DEBUG") + + logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + # WHEN logging a line with a size higher than buffer + # THEN must raise a warning + with pytest.warns(PowertoolsUserWarning, match="Item size*"): + logger.debug("this log line will be flushed") + + +def test_create_buffer_with_items_evicted(stdout, service_name, monkeypatch): + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + + # GIVEN: A logger configured with 1024 bytes + logger_buffer_config = LoggerBufferConfig(max_size=1024, minimum_log_level="DEBUG") + + logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + # WHEN we add 3 lines that exceeds than 1024 bytes + logger.debug("this log line will be flushed") + logger.debug("this log line will be flushed") + logger.debug("this log line will be flushed") + logger.debug("this log line will be flushed") + logger.debug("this log line will be flushed") + + # THEN must raise a warning when trying to flush the lugs + with pytest.warns(PowertoolsUserWarning, match="Some logs are not displayed because*"): + logger.flush_buffer() + + +def test_create_buffer_with_items_evicted_next_invocation(stdout, service_name, monkeypatch): + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + + # GIVEN: A logger configured with 1024 bytes + logger_buffer_config = LoggerBufferConfig(max_size=1024, minimum_log_level="DEBUG") + + logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + # WHEN Add multiple log entries that exceed buffer size + message = "this log line will be flushed" + logger.debug(message) + logger.debug(message) + logger.debug(message) + logger.debug(message) + logger.debug(message) + + # THEN First buffer flush triggers warning about log eviction + with pytest.warns(PowertoolsUserWarning, match="Some logs are not displayed because*"): + logger.flush_buffer() + + # WHEN Add another log entry + logger.debug("new log entry after buffer flush") + + # THEN Subsequent buffer flush should not trigger warning + with warnings.catch_warnings(record=True) as warning_list: + warnings.simplefilter("always") + logger.flush_buffer() + assert len(warning_list) == 0, "No warnings should be raised" From 8b30fe5a55723e8bf1b203b39005a36639fe73e4 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 21 Feb 2025 23:33:43 +0000 Subject: [PATCH 15/40] Adding more logic --- aws_lambda_powertools/logging/buffer/config.py | 6 ++---- aws_lambda_powertools/logging/types.py | 4 +--- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/config.py b/aws_lambda_powertools/logging/buffer/config.py index 9d14fe2375e..f7875c1838d 100644 --- a/aws_lambda_powertools/logging/buffer/config.py +++ b/aws_lambda_powertools/logging/buffer/config.py @@ -1,9 +1,6 @@ from __future__ import annotations -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from aws_lambda_powertools.logging.types import LOG_LEVEL_BUFFER_VALUES +from typing import Literal class LoggerBufferConfig: @@ -13,6 +10,7 @@ class LoggerBufferConfig: # Define class-level constant for valid log levels VALID_LOG_LEVELS: list[str] = ["DEBUG", "INFO", "WARNING"] + LOG_LEVEL_BUFFER_VALUES = Literal["DEBUG", "INFO", "WARNING"] def __init__( self, diff --git a/aws_lambda_powertools/logging/types.py b/aws_lambda_powertools/logging/types.py index 4d8ff853e0e..25f094bc755 100644 --- a/aws_lambda_powertools/logging/types.py +++ b/aws_lambda_powertools/logging/types.py @@ -1,12 +1,10 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Dict, Literal, TypedDict, Union +from typing import TYPE_CHECKING, Any, Dict, TypedDict, Union if TYPE_CHECKING: from typing_extensions import NotRequired, TypeAlias -LOG_LEVEL_BUFFER_VALUES = Literal["DEBUG", "INFO", "WARNING"] - class PowertoolsLogRecord(TypedDict): # Base fields (required) From 189321640e21f31ae491ed9459e187aab99c6d40 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Mon, 24 Feb 2025 17:24:10 +0000 Subject: [PATCH 16/40] Refactoring buffer class --- aws_lambda_powertools/logging/buffer/cache.py | 176 +++++++++++++++--- .../test_logger_buffer_cache.py | 18 +- 2 files changed, 162 insertions(+), 32 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/cache.py b/aws_lambda_powertools/logging/buffer/cache.py index 67911e8ee2d..a1489b52bc5 100644 --- a/aws_lambda_powertools/logging/buffer/cache.py +++ b/aws_lambda_powertools/logging/buffer/cache.py @@ -7,7 +7,104 @@ from aws_lambda_powertools.warnings import PowertoolsUserWarning +class KeyBufferCache: + """ + A cache implementation for a single key with size tracking and eviction support. + + This class manages a buffer for a specific key, keeping track of the current size + and providing methods to add, remove, and manage cached items. It supports automatic + eviction tracking and size management. + + Attributes + ---------- + cache : deque + A double-ended queue storing the cached items. + current_size : int + The total size of all items currently in the cache. + has_evicted : bool + A flag indicating whether any items have been evicted from the cache. + """ + + def __init__(self): + """ + Initialize a buffer cache for a specific key. + """ + self.cache: deque = deque() + self.current_size: int = 0 + self.has_evicted: bool = False + + def add(self, item: Any) -> None: + """ + Add an item to the cache. + + Parameters + ---------- + item : Any + The item to be stored in the cache. + """ + item_size = len(str(item)) + self.cache.append(item) + self.current_size += item_size + + def remove_oldest(self) -> Any: + """ + Remove and return the oldest item from the cache. + + Returns + ------- + Any + The removed item. + """ + removed_item = self.cache.popleft() + self.current_size -= len(str(removed_item)) + self.has_evicted = True + return removed_item + + def get(self) -> list: + """ + Retrieve items for this key. + + Returns + ------- + list + List of items in the cache. + """ + return list(self.cache) + + def clear(self) -> None: + """ + Clear the cache for this key. + """ + self.cache.clear() + self.current_size = 0 + self.has_evicted = False + + class LoggerBufferCache: + """ + A multi-key buffer cache with size-based eviction and management. + + This class provides a flexible caching mechanism that manages multiple keys, + with each key having its own buffer cache. The total size of each key's cache + is limited, and older items are automatically evicted when the size limit is reached. + + Key Features: + - Multiple key support + - Size-based eviction + - Tracking of evicted items + - Configurable maximum buffer size + + Example + -------- + >>> buffer_cache = LoggerBufferCache(max_size_bytes=1000) + >>> buffer_cache.add("logs", "First log message") + >>> buffer_cache.add("debug", "Debug information") + >>> buffer_cache.get("logs") + ['First log message'] + >>> buffer_cache.get_current_size("logs") + 16 + """ + def __init__(self, max_size_bytes: int): """ Initialize the LoggerBufferCache. @@ -15,12 +112,10 @@ def __init__(self, max_size_bytes: int): Parameters ---------- max_size_bytes : int - Maximum size of the cache in bytes. + Maximum size of the cache in bytes for each key. """ self.max_size_bytes: int = max_size_bytes - self.cache: dict[str, deque] = {} - self.current_size: dict[str, int] = {} - self.has_evicted: bool = False + self.cache: dict[str, KeyBufferCache] = {} def add(self, key: str, item: Any) -> None: """ @@ -33,31 +128,34 @@ def add(self, key: str, item: Any) -> None: item : Any The item to be stored in the cache. - Notes - ----- - If the item size exceeds the maximum cache size, it will not be added. + Returns + ------- + bool + True if item was added, False otherwise. """ + # Check if item is larger than entire buffer item_size = len(str(item)) - if item_size > self.max_size_bytes: warnings.warn( - message=f"Item size {item_size} bytes exceeds total cache size {self.max_size_bytes} bytes", - category=PowertoolsUserWarning, + f"Item size {item_size} bytes exceeds total cache size {self.max_size_bytes} bytes", + PowertoolsUserWarning, stacklevel=2, ) - return + return False + # Create the key's cache if it doesn't exist if key not in self.cache: - self.cache[key] = deque() - self.current_size[key] = 0 + self.cache[key] = KeyBufferCache() - while self.current_size[key] + item_size > self.max_size_bytes and self.cache[key]: - removed_item = self.cache[key].popleft() - self.current_size[key] -= len(str(removed_item)) - self.has_evicted = True + # Calculate the size after adding the new item + new_total_size = self.cache[key].current_size + item_size - self.cache[key].append(item) - self.current_size[key] += item_size + # If adding the item would exceed max size, remove oldest items + while new_total_size > self.max_size_bytes and self.cache[key].cache: + self.cache[key].remove_oldest() + new_total_size = self.cache[key].current_size + item_size + + self.cache[key].add(item) def get(self, key: str) -> list: """ @@ -73,7 +171,7 @@ def get(self, key: str) -> list: list List of items for the given key, or an empty list if the key doesn't exist. """ - return list(self.cache.get(key, deque())) + return [] if key not in self.cache else self.cache[key].get() def clear(self, key: str | None = None) -> None: """ @@ -81,13 +179,45 @@ def clear(self, key: str | None = None) -> None: Parameters ---------- - key : str, optional + key : Optional[str], optional The key to clear. If None, clears the entire cache. """ if key: if key in self.cache: + self.cache[key].clear() del self.cache[key] - del self.current_size[key] else: self.cache.clear() - self.current_size.clear() + + def has_evicted(self, key: str) -> bool: + """ + Check if a specific key's cache has evicted items. + + Parameters + ---------- + key : str + The key to check for evicted items. + + Returns + ------- + bool + True if items have been evicted, False otherwise. + """ + return False if key not in self.cache else self.cache[key].has_evicted + + def get_current_size(self, key: str) -> int | None: + """ + Get the current size of the buffer for a specific key. + + Parameters + ---------- + key : str + The key to get the current size for. + + Returns + ------- + int + The current size of the buffer for the key. + Returns 0 if the key does not exist. + """ + return None if key not in self.cache else self.cache[key].current_size diff --git a/tests/unit/logger/required_dependencies/test_logger_buffer_cache.py b/tests/unit/logger/required_dependencies/test_logger_buffer_cache.py index 8bf357d2bb1..5202fa6f5f0 100644 --- a/tests/unit/logger/required_dependencies/test_logger_buffer_cache.py +++ b/tests/unit/logger/required_dependencies/test_logger_buffer_cache.py @@ -12,7 +12,6 @@ def test_initialization(): # THEN cache should have correct initial state assert logger_cache.max_size_bytes == 1000 assert logger_cache.cache == {} - assert logger_cache.current_size == {} def test_add_single_item(): @@ -25,7 +24,7 @@ def test_add_single_item(): # THEN item is stored correctly with proper size tracking assert len(logger_cache.get("key1")) == 1 assert logger_cache.get("key1")[0] == "test_item" - assert logger_cache.current_size["key1"] == len("test_item") + assert logger_cache.get_current_size("key1") == len("test_item") def test_add_multiple_items_same_key(): @@ -39,6 +38,7 @@ def test_add_multiple_items_same_key(): # THEN items are stored sequentially assert len(logger_cache.get("key1")) == 2 assert logger_cache.get("key1") == ["item1", "item2"] + assert logger_cache.has_evicted("key1") is False def test_cache_size_limit_single_key(): @@ -52,7 +52,8 @@ def test_cache_size_limit_single_key(): # THEN cache maintains size limit for a single key assert len(logger_cache.get("key1")) > 0 - assert logger_cache.current_size["key1"] <= 10 + assert logger_cache.get_current_size("key1") <= 10 + assert logger_cache.has_evicted("key1") is True def test_item_larger_than_cache(): @@ -104,7 +105,6 @@ def test_clear_all(): # THEN cache becomes empty assert logger_cache.cache == {} - assert logger_cache.current_size == {} def test_clear_specific_key(): @@ -134,9 +134,9 @@ def test_multiple_keys_with_size_limits(): logger_cache.add("key2", "long_item") # THEN total size remains within limit - assert len(logger_cache.cache["key1"]) > 0 - assert len(logger_cache.cache["key2"]) > 0 - assert logger_cache.current_size["key1"] + logger_cache.current_size["key2"] <= 20 + assert len(logger_cache.get("key1")) > 0 + assert len(logger_cache.get("key2")) > 0 + assert logger_cache.get_current_size("key1") + logger_cache.get_current_size("key2") <= 20 def test_add_different_types(): @@ -162,5 +162,5 @@ def test_cache_size_tracking(): logger_cache.add("key1", "another_item") # THEN current size is tracked correctly - assert logger_cache.current_size["key1"] == len("small") + len("another_item") - assert logger_cache.current_size["key1"] <= 30 + assert logger_cache.get_current_size("key1") == len("small") + len("another_item") + assert logger_cache.get_current_size("key1") <= 30 From ce3804e63c09c3b4cb4414682be45b135980f919 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Mon, 24 Feb 2025 17:33:05 +0000 Subject: [PATCH 17/40] Refactoring buffer class --- aws_lambda_powertools/logging/buffer/cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_lambda_powertools/logging/buffer/cache.py b/aws_lambda_powertools/logging/buffer/cache.py index a1489b52bc5..e3cfb648d9d 100644 --- a/aws_lambda_powertools/logging/buffer/cache.py +++ b/aws_lambda_powertools/logging/buffer/cache.py @@ -141,7 +141,7 @@ def add(self, key: str, item: Any) -> None: PowertoolsUserWarning, stacklevel=2, ) - return False + return # Create the key's cache if it doesn't exist if key not in self.cache: From 7f49bb408e0392762277628b42b1dd56e05c429e Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Tue, 25 Feb 2025 08:57:50 +0000 Subject: [PATCH 18/40] Refactoring buffer class --- aws_lambda_powertools/logging/logger.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 270ae4a024c..105d0c0112b 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -602,15 +602,15 @@ def flush_buffer(self): for log_line in self._buffer_cache.get(tracer_id): self._create_and_flush_log_record(log_line) - if self._buffer_cache.has_evicted: + if self._buffer_cache.has_evicted(tracer_id): warnings.warn( message="Some logs are not displayed because they were evicted from the buffer. " "Increase buffer size to store more logs in the buffer", category=PowertoolsUserWarning, stacklevel=2, ) - self._buffer_cache.has_evicted = False + # Clear the entire cache self._buffer_cache.clear() def debug( From 351d8569c81d785cb9fd96065279de95af0cfe72 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 28 Feb 2025 16:46:22 +0000 Subject: [PATCH 19/40] More refactor --- aws_lambda_powertools/logging/buffer/cache.py | 2 +- aws_lambda_powertools/logging/constants.py | 2 - aws_lambda_powertools/logging/logger.py | 53 ++++++++++--------- .../test_logger_buffer_cache.py | 4 +- 4 files changed, 30 insertions(+), 31 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/cache.py b/aws_lambda_powertools/logging/buffer/cache.py index e3cfb648d9d..6fb2800e106 100644 --- a/aws_lambda_powertools/logging/buffer/cache.py +++ b/aws_lambda_powertools/logging/buffer/cache.py @@ -189,7 +189,7 @@ def clear(self, key: str | None = None) -> None: else: self.cache.clear() - def has_evicted(self, key: str) -> bool: + def has_items_evicted(self, key: str) -> bool: """ Check if a specific key's cache has evicted items. diff --git a/aws_lambda_powertools/logging/constants.py b/aws_lambda_powertools/logging/constants.py index 01549a52d4b..c98204f9bb1 100644 --- a/aws_lambda_powertools/logging/constants.py +++ b/aws_lambda_powertools/logging/constants.py @@ -3,5 +3,3 @@ # logger.init attribute is set when Logger has been configured LOGGER_ATTRIBUTE_PRECONFIGURED = "init" LOGGER_ATTRIBUTE_HANDLER = "logger_handler" - -LOGGER_BUFFER_FIRST_INVOKE = "FIRST_INVOKE" diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 105d0c0112b..a2b96f3ee1f 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -22,7 +22,6 @@ LOGGER_ATTRIBUTE_HANDLER, LOGGER_ATTRIBUTE_POWERTOOLS_HANDLER, LOGGER_ATTRIBUTE_PRECONFIGURED, - LOGGER_BUFFER_FIRST_INVOKE, ) from aws_lambda_powertools.logging.exceptions import ( InvalidBufferItem, @@ -528,7 +527,7 @@ def _add_log_record_to_buffer( exc_info: logging._ExcInfoType = None, stack_info: bool = False, extra: Mapping[str, object] | None = None, - ): + ) -> None: """ Add log record to buffer with intelligent tracer ID handling. @@ -558,33 +557,23 @@ def _add_log_record_to_buffer( between different tracer contexts. """ # Determine tracer ID, defaulting to first invoke marker - tracer_id = get_tracer_id() or LOGGER_BUFFER_FIRST_INVOKE + tracer_id = get_tracer_id() try: - # Create log record for buffering - log_record: dict[str, Any] = _create_buffer_record(level=level, msg=msg, args=args, extra=extra) - - # Migrate log records from first invoke to current tracer context - if tracer_id != LOGGER_BUFFER_FIRST_INVOKE and self._buffer_cache.get(LOGGER_BUFFER_FIRST_INVOKE): - # Retrieve first invoke log records - first_invoke_items = self._buffer_cache.get(LOGGER_BUFFER_FIRST_INVOKE) - - # Transfer log records to current tracer context - for item in first_invoke_items: - self._buffer_cache.add(tracer_id, item) - - # Clear first invoke buffer - self._buffer_cache.clear(LOGGER_BUFFER_FIRST_INVOKE) - - # Add current log record to buffer - self._buffer_cache.add(tracer_id, log_record) + if tracer_id: + log_record: dict[str, Any] = _create_buffer_record(level=level, msg=msg, args=args, extra=extra) + self._buffer_cache.add(tracer_id, log_record) except InvalidBufferItem as exc: - # Wrap and re-raise buffer addition error - raise InvalidBufferItem("Cannot add item to the buffer") from exc + # Wrap and re-raise buffer addition error as warning + warnings.warn( + message=f"Cannot add item to the buffer: {str(exc)}", + category=PowertoolsUserWarning, + stacklevel=3, + ) - def flush_buffer(self): + def flush_buffer(self) -> None: """ - Flush all buffered log records associated with current trace ID. + Flush all buffered log records associated with current execution. Notes ----- @@ -599,10 +588,22 @@ def flush_buffer(self): will be propagated to caller """ tracer_id = get_tracer_id() - for log_line in self._buffer_cache.get(tracer_id): + + # Flushing log without a tracer id? Return + if not tracer_id: + return + + # is buffer empty? return + buffer = self._buffer_cache.get(tracer_id) + if not buffer: + return + + # Process log records + for log_line in buffer: self._create_and_flush_log_record(log_line) - if self._buffer_cache.has_evicted(tracer_id): + # Has items evicted? + if self._buffer_cache.has_items_evicted(tracer_id): warnings.warn( message="Some logs are not displayed because they were evicted from the buffer. " "Increase buffer size to store more logs in the buffer", diff --git a/tests/unit/logger/required_dependencies/test_logger_buffer_cache.py b/tests/unit/logger/required_dependencies/test_logger_buffer_cache.py index 5202fa6f5f0..d1385dfe5c9 100644 --- a/tests/unit/logger/required_dependencies/test_logger_buffer_cache.py +++ b/tests/unit/logger/required_dependencies/test_logger_buffer_cache.py @@ -38,7 +38,7 @@ def test_add_multiple_items_same_key(): # THEN items are stored sequentially assert len(logger_cache.get("key1")) == 2 assert logger_cache.get("key1") == ["item1", "item2"] - assert logger_cache.has_evicted("key1") is False + assert logger_cache.has_items_evicted("key1") is False def test_cache_size_limit_single_key(): @@ -53,7 +53,7 @@ def test_cache_size_limit_single_key(): # THEN cache maintains size limit for a single key assert len(logger_cache.get("key1")) > 0 assert logger_cache.get_current_size("key1") <= 10 - assert logger_cache.has_evicted("key1") is True + assert logger_cache.has_items_evicted("key1") is True def test_item_larger_than_cache(): From 7d9813844e084546a7220c1a367d29afd0af18a7 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 28 Feb 2025 17:31:02 +0000 Subject: [PATCH 20/40] Adding more coverage + documentation --- aws_lambda_powertools/logging/buffer/cache.py | 3 +- aws_lambda_powertools/logging/exceptions.py | 8 - aws_lambda_powertools/logging/logger.py | 255 +++++++++--------- .../test_powertools_logger_buffer.py | 127 ++++++--- 4 files changed, 214 insertions(+), 179 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/cache.py b/aws_lambda_powertools/logging/buffer/cache.py index 6fb2800e106..3bfbb573fe6 100644 --- a/aws_lambda_powertools/logging/buffer/cache.py +++ b/aws_lambda_powertools/logging/buffer/cache.py @@ -137,8 +137,9 @@ def add(self, key: str, item: Any) -> None: item_size = len(str(item)) if item_size > self.max_size_bytes: warnings.warn( + message=f"Cannot add item to the buffer " f"Item size {item_size} bytes exceeds total cache size {self.max_size_bytes} bytes", - PowertoolsUserWarning, + category=PowertoolsUserWarning, stacklevel=2, ) return diff --git a/aws_lambda_powertools/logging/exceptions.py b/aws_lambda_powertools/logging/exceptions.py index 4c489fd32c2..17b1c837b71 100644 --- a/aws_lambda_powertools/logging/exceptions.py +++ b/aws_lambda_powertools/logging/exceptions.py @@ -12,11 +12,3 @@ class OrphanedChildLoggerError(Exception): """ pass - - -class InvalidBufferItem(Exception): - """ - Raised when a buffer item exceeds the maximum allowed buffer size. - """ - - pass diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 39b0dc694fa..4c67216fcad 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -24,7 +24,6 @@ LOGGER_ATTRIBUTE_PRECONFIGURED, ) from aws_lambda_powertools.logging.exceptions import ( - InvalidBufferItem, InvalidLoggerSamplingRateError, OrphanedChildLoggerError, ) @@ -510,136 +509,6 @@ def decorate(event, context, *args, **kwargs): return decorate - def _create_and_flush_log_record(self, log_line: dict) -> None: - """ - Create and immediately flush a log record to the configured logger. - - Parameters - ---------- - log_line : dict[str, Any] - Dictionary containing log record details with keys: - - 'level': Logging level - - 'filename': Source filename - - 'line': Line number - - 'msg': Log message - - 'function': Source function name - - 'extra': Additional context - - 'timestamp': Original log creation time - - Notes - ----- - Bypasses standard logging flow by directly creating and handling a log record. - Preserves original timestamp and source information. - """ - record = self._logger.makeRecord( - name=self.name, - level=log_line["level"], - fn=log_line["filename"], - lno=log_line["line"], - msg=log_line["msg"], - args=(), - exc_info=None, - func=log_line["function"], - extra=log_line["extra"], - ) - record.created = log_line["timestamp"] - self._logger.handle(record) - - def _add_log_record_to_buffer( - self, - level: int, - msg: object, - args: object, - exc_info: logging._ExcInfoType = None, - stack_info: bool = False, - extra: Mapping[str, object] | None = None, - ) -> None: - """ - Add log record to buffer with intelligent tracer ID handling. - - Parameters - ---------- - level : int - Logging level of the record. - msg : object - Log message to be recorded. - args : object - Additional arguments for the log message. - exc_info : logging._ExcInfoType, optional - Exception information for the log record. - stack_info : bool, optional - Whether to include stack information. - extra : Mapping[str, object], optional - Additional contextual information for the log record. - - Raises - ------ - InvalidBufferItem - If the log record cannot be added to the buffer. - - Notes - ----- - Handles special first invocation buffering and migration of log records - between different tracer contexts. - """ - # Determine tracer ID, defaulting to first invoke marker - tracer_id = get_tracer_id() - - try: - if tracer_id: - log_record: dict[str, Any] = _create_buffer_record(level=level, msg=msg, args=args, extra=extra) - self._buffer_cache.add(tracer_id, log_record) - except InvalidBufferItem as exc: - # Wrap and re-raise buffer addition error as warning - warnings.warn( - message=f"Cannot add item to the buffer: {str(exc)}", - category=PowertoolsUserWarning, - stacklevel=3, - ) - - def flush_buffer(self) -> None: - """ - Flush all buffered log records associated with current execution. - - Notes - ----- - Retrieves log records for current trace from buffer - Immediately processes and logs each record - Warning if some cache was evicted in that execution - Clears buffer after complete processing - - Raises - ------ - Any exceptions from underlying logging or buffer mechanisms - will be propagated to caller - """ - tracer_id = get_tracer_id() - - # Flushing log without a tracer id? Return - if not tracer_id: - return - - # is buffer empty? return - buffer = self._buffer_cache.get(tracer_id) - if not buffer: - return - - # Process log records - for log_line in buffer: - self._create_and_flush_log_record(log_line) - - # Has items evicted? - if self._buffer_cache.has_items_evicted(tracer_id): - warnings.warn( - message="Some logs are not displayed because they were evicted from the buffer. " - "Increase buffer size to store more logs in the buffer", - category=PowertoolsUserWarning, - stacklevel=2, - ) - - # Clear the entire cache - self._buffer_cache.clear() - def debug( self, msg: object, @@ -1139,6 +1008,130 @@ def _determine_log_level(self, level: str | int | None) -> str | int: # Powertools log level is set, we use this return powertools_log_level.upper() + # FUNCTIONS for Buffering log + + def _create_and_flush_log_record(self, log_line: dict) -> None: + """ + Create and immediately flush a log record to the configured logger. + + Parameters + ---------- + log_line : dict[str, Any] + Dictionary containing log record details with keys: + - 'level': Logging level + - 'filename': Source filename + - 'line': Line number + - 'msg': Log message + - 'function': Source function name + - 'extra': Additional context + - 'timestamp': Original log creation time + + Notes + ----- + Bypasses standard logging flow by directly creating and handling a log record. + Preserves original timestamp and source information. + """ + record = self._logger.makeRecord( + name=self.name, + level=log_line["level"], + fn=log_line["filename"], + lno=log_line["line"], + msg=log_line["msg"], + args=(), + exc_info=None, + func=log_line["function"], + extra=log_line["extra"], + ) + record.created = log_line["timestamp"] + self._logger.handle(record) + + def _add_log_record_to_buffer( + self, + level: int, + msg: object, + args: object, + exc_info: logging._ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, + ) -> None: + """ + Add log record to buffer with intelligent tracer ID handling. + + Parameters + ---------- + level : int + Logging level of the record. + msg : object + Log message to be recorded. + args : object + Additional arguments for the log message. + exc_info : logging._ExcInfoType, optional + Exception information for the log record. + stack_info : bool, optional + Whether to include stack information. + extra : Mapping[str, object], optional + Additional contextual information for the log record. + + Raises + ------ + InvalidBufferItem + If the log record cannot be added to the buffer. + + Notes + ----- + Handles special first invocation buffering and migration of log records + between different tracer contexts. + """ + # Determine tracer ID, defaulting to first invoke marker + tracer_id = get_tracer_id() + + if tracer_id: + log_record: dict[str, Any] = _create_buffer_record(level=level, msg=msg, args=args, extra=extra) + self._buffer_cache.add(tracer_id, log_record) + + def flush_buffer(self) -> None: + """ + Flush all buffered log records associated with current execution. + + Notes + ----- + Retrieves log records for current trace from buffer + Immediately processes and logs each record + Warning if some cache was evicted in that execution + Clears buffer after complete processing + + Raises + ------ + Any exceptions from underlying logging or buffer mechanisms + will be propagated to caller + """ + tracer_id = get_tracer_id() + + # Flushing log without a tracer id? Return + if not tracer_id: + return + + # is buffer empty? return + buffer = self._buffer_cache.get(tracer_id) + if not buffer: + return + + # Process log records + for log_line in buffer: + self._create_and_flush_log_record(log_line) + + # Has items evicted? + if self._buffer_cache.has_items_evicted(tracer_id): + warnings.warn( + message="Some logs are not displayed because they were evicted from the buffer. " + "Increase buffer size to store more logs in the buffer", + category=PowertoolsUserWarning, + stacklevel=2, + ) + + # Clear the entire cache + self._buffer_cache.clear() + def set_package_logger( level: str | int = logging.DEBUG, diff --git a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py index f4c29e1bbb6..bb180c13844 100644 --- a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py +++ b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py @@ -48,7 +48,8 @@ def capture_multiple_logging_statements_output(stdout): @pytest.mark.parametrize("log_level", ["DEBUG", "WARNING", "INFO"]) def test_logger_buffer_with_minimum_level_warning(log_level, stdout, service_name): - # GIVEN a configured logger with buffer enabled and specific minimum log level + + # GIVEN A logger configured with a buffer and minimum log level set to WARNING logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="WARNING") logger = Logger(level=log_level, service=service_name, stream=stdout, logger_buffer=logger_buffer_config) @@ -59,7 +60,7 @@ def test_logger_buffer_with_minimum_level_warning(log_level, stdout, service_nam "DEBUG": logger.debug, } - # WHEN a log message is sent using the corresponding log method + # WHEN Logging a message using the specified log level log_message = log_command[log_level] log_message(msg) log_dict = stdout.getvalue() @@ -69,41 +70,43 @@ def test_logger_buffer_with_minimum_level_warning(log_level, stdout, service_nam def test_logger_buffer_is_never_buffered_with_exception(stdout, service_name): - # GIVEN: A logger configured with buffer + # GIVEN A logger configured with a buffer and default logging behavior logger_buffer_config = LoggerBufferConfig(max_size=10240) logger = Logger(service=service_name, stream=stdout, logger_buffer=logger_buffer_config) - # WHEN: An exception is raised and logged + # WHEN An exception is raised and logged try: raise ValueError("something went wrong") except Exception: logger.exception("Received an exception") - # THEN: We expect the log record is not buffered + # THEN We expect the log record is not buffered log = capture_logging_output(stdout) assert "Received an exception" == log["message"] def test_logger_buffer_is_never_buffered_with_error(stdout, service_name): - # GIVEN: A logger configured with buffer + # GIVEN A logger configured with a buffer and default logging behavior logger_buffer_config = LoggerBufferConfig(max_size=10240) logger = Logger(service=service_name, stream=stdout, logger_buffer=logger_buffer_config) - # WHEN: An exception is raised and logged + # WHEN Logging an error message logger.error("Received an exception") - # THEN: We expect the log record is not buffered + # THEN The error log should be immediately output without buffering log = capture_logging_output(stdout) assert "Received an exception" == log["message"] @pytest.mark.parametrize("log_level", ["CRITICAL", "ERROR"]) def test_logger_buffer_is_flushed_when_an_error_happens(stdout, service_name, log_level, monkeypatch): - # GIVEN: A logger configured with buffer monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + + # GIVEN A logger configured with buffer and automatic error-based flushing logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG", flush_on_error=True) logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + # WHEN Adding debug log messages before triggering an error logger.debug("this log line will be flushed") logger.debug("this log line will be flushed too") @@ -113,22 +116,26 @@ def test_logger_buffer_is_flushed_when_an_error_happens(stdout, service_name, lo "EXCEPTION": logger.exception, } - # WHEN a log message is sent using the corresponding log method + # WHEN Logging an error message using the specified log level log_message = log_command[log_level] log_message("Received an exception") - # THEN: We expect the log record is not buffered + # THEN: All buffered log messages should be flushed and output log = capture_multiple_logging_statements_output(stdout) + assert isinstance(log, list) assert "this log line will be flushed" == log[0]["message"] assert "this log line will be flushed too" == log[1]["message"] @pytest.mark.parametrize("log_level", ["CRITICAL", "ERROR"]) -def test_logger_buffer_is_not_flushed_when_an_error_happens(stdout, service_name, log_level): - # GIVEN: A logger configured with buffer +def test_logger_buffer_is_not_flushed_when_an_error_happens(stdout, service_name, log_level, monkeypatch): + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + + # GIVEN A logger configured with a buffer and error flushing disabled logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG", flush_on_error=False) logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + # WHEN Adding debug log messages before an error logger.debug("this log line will be flushed") logger.debug("this log line will be flushed too") @@ -138,73 +145,62 @@ def test_logger_buffer_is_not_flushed_when_an_error_happens(stdout, service_name "EXCEPTION": logger.exception, } - # WHEN a log message is sent using the corresponding log method + # WHEN Logging an error message using the specified log level log_message = log_command[log_level] log_message("Received an exception") - # THEN: We expect the log record is not buffered + # THEN The error log message should be output, but previous debug logs should remain buffered log = capture_logging_output(stdout) + assert not isinstance(log, list) assert "Received an exception" == log["message"] + assert log_level == log["level"] def test_create_and_flush_logs(stdout, service_name, monkeypatch): - # GIVEN: A logger configured with buffer monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + + # GIVEN A logger configured with a large buffer logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG", flush_on_error=True) logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + # WHEN Logging a message and then flushing the buffer logger.debug("this log line will be flushed") - logger.flush_buffer() - # THEN: We expect the log record is not buffered + # THEN The log record should be immediately output and not remain buffered log = capture_multiple_logging_statements_output(stdout) assert "this log line will be flushed" == log[0]["message"] -def test_create_buffer_with_item_overflow(stdout, service_name, monkeypatch): - monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") - - # GIVEN: A logger configured with 2 bytes - logger_buffer_config = LoggerBufferConfig(max_size=2, minimum_log_level="DEBUG") - - logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) - - # WHEN logging a line with a size higher than buffer - # THEN must raise a warning - with pytest.warns(PowertoolsUserWarning, match="Item size*"): - logger.debug("this log line will be flushed") - - def test_create_buffer_with_items_evicted(stdout, service_name, monkeypatch): monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") - # GIVEN: A logger configured with 1024 bytes + # GIVEN A logger configured with a 1024-byte buffer logger_buffer_config = LoggerBufferConfig(max_size=1024, minimum_log_level="DEBUG") logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) - # WHEN we add 3 lines that exceeds than 1024 bytes + # WHEN Adding multiple log entries that exceed buffer size logger.debug("this log line will be flushed") logger.debug("this log line will be flushed") logger.debug("this log line will be flushed") logger.debug("this log line will be flushed") logger.debug("this log line will be flushed") - # THEN must raise a warning when trying to flush the lugs + # THEN A warning should be raised when flushing logs that exceed buffer capacity with pytest.warns(PowertoolsUserWarning, match="Some logs are not displayed because*"): logger.flush_buffer() -def test_create_buffer_with_items_evicted_next_invocation(stdout, service_name, monkeypatch): +def test_create_buffer_with_items_evicted_with_next_invocation(stdout, service_name, monkeypatch): monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") - # GIVEN: A logger configured with 1024 bytes + # GIVEN A logger configured with a 1024-byte buffer logger_buffer_config = LoggerBufferConfig(max_size=1024, minimum_log_level="DEBUG") logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) - # WHEN Add multiple log entries that exceed buffer size + # WHEN Adding multiple log entries that exceed buffer size message = "this log line will be flushed" logger.debug(message) logger.debug(message) @@ -216,7 +212,8 @@ def test_create_buffer_with_items_evicted_next_invocation(stdout, service_name, with pytest.warns(PowertoolsUserWarning, match="Some logs are not displayed because*"): logger.flush_buffer() - # WHEN Add another log entry + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "12345") + # WHEN Adding another log entry after initial flush logger.debug("new log entry after buffer flush") # THEN Subsequent buffer flush should not trigger warning @@ -224,3 +221,55 @@ def test_create_buffer_with_items_evicted_next_invocation(stdout, service_name, warnings.simplefilter("always") logger.flush_buffer() assert len(warning_list) == 0, "No warnings should be raised" + + +def test_flush_buffer_when_empty(stdout, service_name, monkeypatch): + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + + # GIVEN: A logger configured with a 1024-byte buffer + logger_buffer_config = LoggerBufferConfig(max_size=1024, minimum_log_level="DEBUG") + + logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + # WHEN: Flushing the buffer without adding any log entries + logger.flush_buffer() + + # THEN: No output should be generated + log = capture_multiple_logging_statements_output(stdout) + assert not log + + +def test_log_record_exceeding_buffer_size(stdout, service_name, monkeypatch): + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + + # GIVEN A logger configured with a small 10-byte buffer + logger_buffer_config = LoggerBufferConfig(max_size=10, minimum_log_level="DEBUG") + + logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + # WHEN Attempting to log a message larger than the entire buffer + # THE: A warning should be raised indicating buffer size limitation + with pytest.warns(PowertoolsUserWarning, match="Cannot add item to the buffer*"): + logger.debug("this log is bigger than entire buffer size") + + +@pytest.mark.parametrize("log_level", ["WARNING", "INFO"]) +def test_logger_buffer_log_output_for_levels_above_minimum(log_level, stdout, service_name): + # GIVEN A logger configured with a buffer and minimum log level set to DEBUG + logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG") + logger = Logger(level=log_level, service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + msg = f"This is a test with level {log_level}" + log_command = { + "INFO": logger.info, + "WARNING": logger.warning, + } + + # WHEN Logging a message using the specified log level higher than debug + log_message = log_command[log_level] + log_message(msg) + + # THEN: The logged message should be immediately output and not buffered + log = capture_multiple_logging_statements_output(stdout) + assert len(log) == 1 + assert log[0]["message"] == msg From 0c8cb3b72105857d48fcb9830eabc98d93f88353 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 28 Feb 2025 18:20:17 +0000 Subject: [PATCH 21/40] Adding decorator --- aws_lambda_powertools/logging/logger.py | 17 ++++++- .../test_powertools_logger_buffer.py | 48 +++++++++++++++++++ 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 4c67216fcad..a64837d9d03 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -400,6 +400,7 @@ def inject_lambda_context( log_event: bool | None = None, correlation_id_path: str | None = None, clear_state: bool | None = False, + flush_buffer_on_uncaught_error: bool = False, ) -> AnyCallableT: ... @overload @@ -409,6 +410,7 @@ def inject_lambda_context( log_event: bool | None = None, correlation_id_path: str | None = None, clear_state: bool | None = False, + flush_buffer_on_uncaught_error: bool = False, ) -> Callable[[AnyCallableT], AnyCallableT]: ... def inject_lambda_context( @@ -417,6 +419,7 @@ def inject_lambda_context( log_event: bool | None = None, correlation_id_path: str | None = None, clear_state: bool | None = False, + flush_buffer_on_uncaught_error: bool = False, ) -> Any: """Decorator to capture Lambda contextual info and inject into logger @@ -473,6 +476,7 @@ def handler(event, context): log_event=log_event, correlation_id_path=correlation_id_path, clear_state=clear_state, + flush_buffer_on_uncaught_error=flush_buffer_on_uncaught_error, ) log_event = resolve_truthy_env_var_choice( @@ -505,7 +509,18 @@ def decorate(event, context, *args, **kwargs): if self.sampling_rate and not cold_start: self.refresh_sample_rate_calculation() - return lambda_handler(event, context, *args, **kwargs) + try: + # Execute the Lambda handler with provided event and context + return lambda_handler(event, context, *args, **kwargs) + except: + # Re-raise any exceptions that occur during handler execution + raise + finally: + # Flush the log buffer if configured to do so on uncaught errors + # Ensures logging state is cleaned up even if an exception is raised + if flush_buffer_on_uncaught_error: + logger.debug("An error happened, flushing the buffer") + self.flush_buffer() return decorate diff --git a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py index bb180c13844..69083aec9e4 100644 --- a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py +++ b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py @@ -273,3 +273,51 @@ def test_logger_buffer_log_output_for_levels_above_minimum(log_level, stdout, se log = capture_multiple_logging_statements_output(stdout) assert len(log) == 1 assert log[0]["message"] == msg + + +def test_logger_buffer_flush_on_uncaught_exception(stdout, service_name, monkeypatch, lambda_context): + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + + # GIVEN: A logger configured with a large buffer and error-based flushing + logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG", flush_on_error=True) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + @logger.inject_lambda_context(flush_buffer_on_uncaught_error=True) + def handler(event, context): + # Log messages that should be flushed when an exception occurs + logger.debug("this log line will be flushed after error - 1") + logger.debug("this log line will be flushed after error - 2") + raise ValueError("Test error") + + # WHEN Invoking the handler and expecting a ValueError + with pytest.raises(ValueError): + handler({}, lambda_context) + + # THEN Verify that buffered log messages are flushed before the exception + log = capture_multiple_logging_statements_output(stdout) + assert len(log) == 2, "Expected two log messages to be flushed" + assert log[0]["message"] == "this log line will be flushed after error - 1" + assert log[1]["message"] == "this log line will be flushed after error - 2" + + +def test_logger_buffer_not_flush_on_uncaught_exception(stdout, service_name, monkeypatch, lambda_context): + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + + # GIVEN: A logger configured with a large buffer and error-based flushing + logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG", flush_on_error=True) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + @logger.inject_lambda_context(flush_buffer_on_uncaught_error=False) + def handler(event, context): + # Log messages that should be flushed when an exception occurs + logger.debug("this log line will be flushed after error - 1") + logger.debug("this log line will be flushed after error - 2") + raise ValueError("Test error") + + # WHEN Invoking the handler and expecting a ValueError + with pytest.raises(ValueError): + handler({}, lambda_context) + + # THEN Verify that buffered log messages are flushed before the exception + log = capture_multiple_logging_statements_output(stdout) + assert len(log) == 0 From c9779049a3fed377473ac8cb9754239d27d0ee34 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Sat, 1 Mar 2025 14:58:45 +0000 Subject: [PATCH 22/40] Refactoring variables name --- .../logging/buffer/functions.py | 2 +- aws_lambda_powertools/logging/logger.py | 37 +++++++++---------- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/functions.py b/aws_lambda_powertools/logging/buffer/functions.py index f8b0be4c66d..c0581a0cb13 100644 --- a/aws_lambda_powertools/logging/buffer/functions.py +++ b/aws_lambda_powertools/logging/buffer/functions.py @@ -11,7 +11,7 @@ def _create_buffer_record( args: object, extra: Mapping[str, object] | None = None, ) -> dict[str, Any]: - caller_frame = inspect.stack()[2] + caller_frame = inspect.stack()[3] timestamp = time.time() return { diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index a64837d9d03..cc0adede08b 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -269,9 +269,9 @@ def __init__( "serialize_stacktrace": serialize_stacktrace, } - self._logger_buffer = logger_buffer - if self._logger_buffer: - self._buffer_cache = LoggerBufferCache(max_size_bytes=self._logger_buffer.max_size) + self.buffer_config = logger_buffer + if self.buffer_config: + self.buffer_cache = LoggerBufferCache(max_size_bytes=self.buffer_config.max_size) self._init_logger(formatter_options=formatter_options, log_level=level, **kwargs) @@ -513,14 +513,13 @@ def decorate(event, context, *args, **kwargs): # Execute the Lambda handler with provided event and context return lambda_handler(event, context, *args, **kwargs) except: - # Re-raise any exceptions that occur during handler execution - raise - finally: # Flush the log buffer if configured to do so on uncaught errors # Ensures logging state is cleaned up even if an exception is raised if flush_buffer_on_uncaught_error: - logger.debug("An error happened, flushing the buffer") + logger.debug("Uncaught error detected, flushing log buffer before exit") self.flush_buffer() + # Re-raise any exceptions that occur during handler execution + raise return decorate @@ -547,7 +546,7 @@ def debug( # Also for clarity over complexity # Buffer is not active and we need to log immediately - if not self._logger_buffer: + if not self.buffer_config: return self._logger.debug( msg, *args, @@ -590,7 +589,7 @@ def info( # Also for clarity over complexity # Buffer is not active and we need to log immediately - if not self._logger_buffer: + if not self.buffer_config: return self._logger.info( msg, *args, @@ -601,7 +600,7 @@ def info( ) # Bypass buffer when log severity meets or exceeds configured minimum - if _check_minimum_buffer_log_level(self._logger_buffer.minimum_log_level, "INFO"): + if _check_minimum_buffer_log_level(self.buffer_config.minimum_log_level, "INFO"): return self._logger.info( msg, *args, @@ -644,7 +643,7 @@ def warning( # Also for clarity over complexity # Buffer is not active and we need to log immediately - if not self._logger_buffer: + if not self.buffer_config: return self._logger.warning( msg, *args, @@ -655,7 +654,7 @@ def warning( ) # Bypass buffer when log severity meets or exceeds configured minimum - if _check_minimum_buffer_log_level(self._logger_buffer.minimum_log_level, "WARNING"): + if _check_minimum_buffer_log_level(self.buffer_config.minimum_log_level, "WARNING"): return self._logger.warning( msg, *args, @@ -693,7 +692,7 @@ def error( # 2. If auto-flush enabled, trigger complete buffer processing # 3. Error log is not "bufferable", so ensure error log is immediately available - if self._logger_buffer and self._logger_buffer.flush_on_error: + if self.buffer_config and self.buffer_config.flush_on_error: self.flush_buffer() return self._logger.error( @@ -722,7 +721,7 @@ def critical( # 1. Buffer configuration checked for immediate flush # 2. If auto-flush enabled, trigger complete buffer processing # 3. Critical log is not "bufferable", so ensure error log is immediately available - if self._logger_buffer and self._logger_buffer.flush_on_error: + if self.buffer_config and self.buffer_config.flush_on_error: self.flush_buffer() return self._logger.critical( @@ -751,7 +750,7 @@ def exception( # 1. Buffer configuration checked for immediate flush # 2. If auto-flush enabled, trigger complete buffer processing # 3. Exception log is not "bufferable", so ensure error log is immediately available - if self._logger_buffer and self._logger_buffer.flush_on_error: + if self.buffer_config and self.buffer_config.flush_on_error: self.flush_buffer() return self._logger.exception( @@ -1102,7 +1101,7 @@ def _add_log_record_to_buffer( if tracer_id: log_record: dict[str, Any] = _create_buffer_record(level=level, msg=msg, args=args, extra=extra) - self._buffer_cache.add(tracer_id, log_record) + self.buffer_cache.add(tracer_id, log_record) def flush_buffer(self) -> None: """ @@ -1127,7 +1126,7 @@ def flush_buffer(self) -> None: return # is buffer empty? return - buffer = self._buffer_cache.get(tracer_id) + buffer = self.buffer_cache.get(tracer_id) if not buffer: return @@ -1136,7 +1135,7 @@ def flush_buffer(self) -> None: self._create_and_flush_log_record(log_line) # Has items evicted? - if self._buffer_cache.has_items_evicted(tracer_id): + if self.buffer_cache.has_items_evicted(tracer_id): warnings.warn( message="Some logs are not displayed because they were evicted from the buffer. " "Increase buffer size to store more logs in the buffer", @@ -1145,7 +1144,7 @@ def flush_buffer(self) -> None: ) # Clear the entire cache - self._buffer_cache.clear() + self.buffer_cache.clear() def set_package_logger( From c9adbee9ba2e8087c1e0b9103241a6df4b4a7084 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Sat, 1 Mar 2025 23:39:20 +0000 Subject: [PATCH 23/40] Adding more tests + propagating buffer configuration --- aws_lambda_powertools/logging/logger.py | 23 ++++- .../test_powertools_logger_buffer.py | 97 ++++++++++++++++--- 2 files changed, 103 insertions(+), 17 deletions(-) diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index cc0adede08b..5a3f607535e 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -273,7 +273,13 @@ def __init__( if self.buffer_config: self.buffer_cache = LoggerBufferCache(max_size_bytes=self.buffer_config.max_size) - self._init_logger(formatter_options=formatter_options, log_level=level, **kwargs) + self._init_logger( + formatter_options=formatter_options, + log_level=level, + buffer_config=self.buffer_config, + buffer_cache=getattr(self, "buffer_cache", None), + **kwargs, + ) if self.log_uncaught_exceptions: logger.debug("Replacing exception hook") @@ -317,6 +323,8 @@ def _init_logger( self, formatter_options: dict | None = None, log_level: str | int | None = None, + buffer_config: LoggerBufferConfig | None = None, + buffer_cache: LoggerBufferCache | None = None, **kwargs, ) -> None: """Configures new logger""" @@ -332,6 +340,11 @@ def _init_logger( return if is_logger_preconfigured: + # Reuse existing buffer configuration from a previously configured logger + # Ensures consistent buffer settings across logger instances within the same service + # Enables buffer propagation and maintains a unified logging configuration + self.buffer_config = self._logger.powertools_buffer_config # type: ignore[attr-defined] + self.buffer_cache = self._logger.powertools_buffer_cache # type: ignore[attr-defined] return self.setLevel(log_level) @@ -356,6 +369,8 @@ def _init_logger( logger.debug(f"Marking logger {self.service} as preconfigured") self._logger.init = True # type: ignore[attr-defined] self._logger.powertools_handler = self.logger_handler # type: ignore[attr-defined] + self._logger.powertools_buffer_config = buffer_config # type: ignore[attr-defined] + self._logger.powertools_buffer_cache = buffer_cache # type: ignore[attr-defined] def refresh_sample_rate_calculation(self) -> None: """ @@ -721,6 +736,7 @@ def critical( # 1. Buffer configuration checked for immediate flush # 2. If auto-flush enabled, trigger complete buffer processing # 3. Critical log is not "bufferable", so ensure error log is immediately available + if self.buffer_config and self.buffer_config.flush_on_error: self.flush_buffer() @@ -1100,6 +1116,11 @@ def _add_log_record_to_buffer( tracer_id = get_tracer_id() if tracer_id: + if not self.buffer_cache.get(tracer_id): + # Detect new Lambda invocation context and reset buffer to maintain log isolation + # Ensures logs from previous invocations do not leak into current execution + self.buffer_cache.clear() + log_record: dict[str, Any] = _create_buffer_record(level=level, msg=msg, args=args, extra=extra) self.buffer_cache.add(tracer_id, log_record) diff --git a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py index 69083aec9e4..519a5c694a2 100644 --- a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py +++ b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py @@ -47,7 +47,9 @@ def capture_multiple_logging_statements_output(stdout): @pytest.mark.parametrize("log_level", ["DEBUG", "WARNING", "INFO"]) -def test_logger_buffer_with_minimum_level_warning(log_level, stdout, service_name): +def test_logger_buffer_with_minimum_level_warning(log_level, stdout, service_name, monkeypatch): + + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with a buffer and minimum log level set to WARNING logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="WARNING") @@ -100,7 +102,7 @@ def test_logger_buffer_is_never_buffered_with_error(stdout, service_name): @pytest.mark.parametrize("log_level", ["CRITICAL", "ERROR"]) def test_logger_buffer_is_flushed_when_an_error_happens(stdout, service_name, log_level, monkeypatch): - monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with buffer and automatic error-based flushing logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG", flush_on_error=True) @@ -129,7 +131,7 @@ def test_logger_buffer_is_flushed_when_an_error_happens(stdout, service_name, lo @pytest.mark.parametrize("log_level", ["CRITICAL", "ERROR"]) def test_logger_buffer_is_not_flushed_when_an_error_happens(stdout, service_name, log_level, monkeypatch): - monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with a buffer and error flushing disabled logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG", flush_on_error=False) @@ -157,10 +159,10 @@ def test_logger_buffer_is_not_flushed_when_an_error_happens(stdout, service_name def test_create_and_flush_logs(stdout, service_name, monkeypatch): - monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with a large buffer - logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG", flush_on_error=True) + logger_buffer_config = LoggerBufferConfig(max_size=10240) logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) # WHEN Logging a message and then flushing the buffer @@ -172,12 +174,27 @@ def test_create_and_flush_logs(stdout, service_name, monkeypatch): assert "this log line will be flushed" == log[0]["message"] +def test_ensure_log_location_after_flush_buffer(stdout, service_name, monkeypatch): + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") + + # GIVEN A logger configured with a sufficiently large buffer + logger_buffer_config = LoggerBufferConfig(max_size=10240) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + # WHEN Logging a debug message and immediately flushing the buffer + logger.debug("this log line will be flushed") + logger.flush_buffer() + + # THEN Validate that the log location is precisely captured + log = capture_multiple_logging_statements_output(stdout) + assert "test_ensure_log_location_after_flush_buffer:184" in log[0]["location"] + + def test_create_buffer_with_items_evicted(stdout, service_name, monkeypatch): - monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with a 1024-byte buffer logger_buffer_config = LoggerBufferConfig(max_size=1024, minimum_log_level="DEBUG") - logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) # WHEN Adding multiple log entries that exceed buffer size @@ -193,11 +210,10 @@ def test_create_buffer_with_items_evicted(stdout, service_name, monkeypatch): def test_create_buffer_with_items_evicted_with_next_invocation(stdout, service_name, monkeypatch): - monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with a 1024-byte buffer logger_buffer_config = LoggerBufferConfig(max_size=1024, minimum_log_level="DEBUG") - logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) # WHEN Adding multiple log entries that exceed buffer size @@ -224,7 +240,7 @@ def test_create_buffer_with_items_evicted_with_next_invocation(stdout, service_n def test_flush_buffer_when_empty(stdout, service_name, monkeypatch): - monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN: A logger configured with a 1024-byte buffer logger_buffer_config = LoggerBufferConfig(max_size=1024, minimum_log_level="DEBUG") @@ -240,7 +256,7 @@ def test_flush_buffer_when_empty(stdout, service_name, monkeypatch): def test_log_record_exceeding_buffer_size(stdout, service_name, monkeypatch): - monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with a small 10-byte buffer logger_buffer_config = LoggerBufferConfig(max_size=10, minimum_log_level="DEBUG") @@ -254,7 +270,9 @@ def test_log_record_exceeding_buffer_size(stdout, service_name, monkeypatch): @pytest.mark.parametrize("log_level", ["WARNING", "INFO"]) -def test_logger_buffer_log_output_for_levels_above_minimum(log_level, stdout, service_name): +def test_logger_buffer_log_output_for_levels_above_minimum(log_level, stdout, service_name, monkeypatch): + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") + # GIVEN A logger configured with a buffer and minimum log level set to DEBUG logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG") logger = Logger(level=log_level, service=service_name, stream=stdout, logger_buffer=logger_buffer_config) @@ -276,10 +294,10 @@ def test_logger_buffer_log_output_for_levels_above_minimum(log_level, stdout, se def test_logger_buffer_flush_on_uncaught_exception(stdout, service_name, monkeypatch, lambda_context): - monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN: A logger configured with a large buffer and error-based flushing - logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG", flush_on_error=True) + logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG") logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) @logger.inject_lambda_context(flush_buffer_on_uncaught_error=True) @@ -301,10 +319,10 @@ def handler(event, context): def test_logger_buffer_not_flush_on_uncaught_exception(stdout, service_name, monkeypatch, lambda_context): - monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1234") + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN: A logger configured with a large buffer and error-based flushing - logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG", flush_on_error=True) + logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG") logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) @logger.inject_lambda_context(flush_buffer_on_uncaught_error=False) @@ -321,3 +339,50 @@ def handler(event, context): # THEN Verify that buffered log messages are flushed before the exception log = capture_multiple_logging_statements_output(stdout) assert len(log) == 0 + + +def test_buffer_configuration_propagation_across_logger_instances(stdout, service_name, monkeypatch): + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") + + # GIVEN A logger configured with specific buffer settings + logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG") + + # Create primary logger with explicit buffer configuration + primary_logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + # Create secondary logger for the same service (should inherit buffer config) + secondary_logger = Logger(level="DEBUG", service=service_name) + + # WHEN Logging messages and flushing the buffer + primary_logger.debug("Log message from primary logger") + secondary_logger.debug("Log message from secondary logger") + primary_logger.flush_buffer() + + # THEN Verify log messages are correctly captured and output + log = capture_multiple_logging_statements_output(stdout) + + assert "Log message from primary logger" == log[0]["message"] + assert "Log message from secondary logger" == log[1]["message"] + assert primary_logger._logger.powertools_buffer_config == secondary_logger._logger.powertools_buffer_config + + +def test_logger_buffer_is_cleared_between_lambda_invocations(stdout, service_name, monkeypatch, lambda_context): + # Set initial trace ID for first Lambda invocation + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") + + # GIVEN A logger configured with specific buffer parameters + logger_buffer_config = LoggerBufferConfig(max_size=10240) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + def handler(event, context): + logger.debug("debug line") + + # WHEN First Lambda invocation with initial trace ID + handler({}, lambda_context) + + # WHEN New Lambda invocation arrives with different trace ID + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "2-ABC39786-5908a82a246fb67f3089263f") + handler({}, lambda_context) + + # THEN Verify buffer for the original trace ID is cleared + assert not logger.buffer_cache.get("1-67c39786-5908a82a246fb67f3089263f") From 699a3d8bab9a963125c5796b2ccd0ad857980205 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Sun, 2 Mar 2025 00:18:06 +0000 Subject: [PATCH 24/40] Adding exception fields --- .../logging/buffer/functions.py | 87 ++++++++++++++++++- aws_lambda_powertools/logging/logger.py | 11 ++- .../test_powertools_logger_buffer.py | 28 +++++- 3 files changed, 120 insertions(+), 6 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/functions.py b/aws_lambda_powertools/logging/buffer/functions.py index c0581a0cb13..1bbd2814fd9 100644 --- a/aws_lambda_powertools/logging/buffer/functions.py +++ b/aws_lambda_powertools/logging/buffer/functions.py @@ -1,19 +1,63 @@ from __future__ import annotations import inspect +import sys import time -from typing import Any, Mapping +from typing import TYPE_CHECKING, Any, Mapping + +if TYPE_CHECKING: + import logging def _create_buffer_record( level: int, msg: object, args: object, + exc_info: logging._ExcInfoType = None, + stack_info: bool = False, extra: Mapping[str, object] | None = None, ) -> dict[str, Any]: + """ + Create a structured log record for buffering to save in buffer. + + Parameters + ---------- + level : int + Logging level (e.g., logging.DEBUG, logging.INFO) indicating log severity. + msg : object + The log message to be recorded. + args : object + Additional arguments associated with the log message. + exc_info : logging._ExcInfoType, optional + Exception information to be included in the log record. + If None, no exception details will be captured. + stack_info : bool, default False + Flag to include stack trace information in the log record. + extra : Mapping[str, object], optional + Additional context or metadata to be attached to the log record. + + Returns + ------- + dict[str, Any] + + Notes + ----- + - Captures caller frame information for precise log source tracking + - Automatically handles exception context + """ + # Retrieve the caller's frame information to capture precise log context + # Uses inspect.stack() with index 3 to get the original caller's details caller_frame = inspect.stack()[3] + + # Get the current timestamp timestamp = time.time() + # Dynamically replace exc_info with current system exception information + # This ensures the most recent exception is captured if available + if exc_info: + exc_info = sys.exc_info() + + # Construct and return the og record dictionary return { "level": level, "msg": msg, @@ -23,11 +67,47 @@ def _create_buffer_record( "function": caller_frame.function, "extra": extra, "timestamp": timestamp, + "exc_info": exc_info, + "stack_info": stack_info, } def _check_minimum_buffer_log_level(buffer_log_level, current_log_level): - # Define log level mapping + """ + Determine if the current log level meets or exceeds the buffer's minimum log level. + + Compares log levels to decide whether a log message should be included in the buffer. + + Parameters + ---------- + buffer_log_level : str + Minimum log level configured for the buffer. + Must be one of: 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'. + current_log_level : str + Log level of the current log message. + Must be one of: 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'. + + Returns + ------- + bool + True if the current log level is lower (more verbose) than the buffer's + minimum log level, indicating the message should be buffered. + False if the current log level is higher (less verbose) and should not be buffered. + + Notes + ----- + - Log levels are compared based on their numeric severity + - Conversion to uppercase ensures case-insensitive comparisons + + Examples + -------- + >>> _check_minimum_buffer_log_level('INFO', 'DEBUG') + True + >>> _check_minimum_buffer_log_level('ERROR', 'WARNING') + False + """ + # Predefined log level mapping with numeric severity values + # Lower values indicate more verbose logging levels log_levels = { "DEBUG": 10, "INFO": 20, @@ -36,7 +116,8 @@ def _check_minimum_buffer_log_level(buffer_log_level, current_log_level): "CRITICAL": 50, } - # Convert string levels to numeric if needed + # Normalize input log levels to uppercase for consistent comparison + # Retrieve corresponding numeric log level values buffer_level_num = log_levels.get(buffer_log_level.upper()) current_level_num = log_levels.get(current_log_level.upper()) diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 5a3f607535e..aeb1b54457f 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -1068,7 +1068,7 @@ def _create_and_flush_log_record(self, log_line: dict) -> None: lno=log_line["line"], msg=log_line["msg"], args=(), - exc_info=None, + exc_info=log_line["exc_info"], func=log_line["function"], extra=log_line["extra"], ) @@ -1121,7 +1121,14 @@ def _add_log_record_to_buffer( # Ensures logs from previous invocations do not leak into current execution self.buffer_cache.clear() - log_record: dict[str, Any] = _create_buffer_record(level=level, msg=msg, args=args, extra=extra) + log_record: dict[str, Any] = _create_buffer_record( + level=level, + msg=msg, + args=args, + exc_info=exc_info, + stack_info=stack_info, + extra=extra, + ) self.buffer_cache.add(tracer_id, log_record) def flush_buffer(self) -> None: diff --git a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py index 519a5c694a2..e8d119feb5f 100644 --- a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py +++ b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py @@ -187,7 +187,33 @@ def test_ensure_log_location_after_flush_buffer(stdout, service_name, monkeypatc # THEN Validate that the log location is precisely captured log = capture_multiple_logging_statements_output(stdout) - assert "test_ensure_log_location_after_flush_buffer:184" in log[0]["location"] + assert "test_ensure_log_location_after_flush_buffer" in log[0]["location"] + + +def test_exception_logging_during_buffer_flush(stdout, service_name, monkeypatch): + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") + + # GIVEN A logger configured with a sufficiently large buffer + logger_buffer_config = LoggerBufferConfig(max_size=10240) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + # Custom exception class + class MyError(BaseException): + pass + + # WHEN Logging an exception and flushing the buffer + try: + raise MyError("Test exception message") + except MyError as error: + logger.debug("Logging a test exception to verify buffer and exception handling", exc_info=error) + + logger.flush_buffer() + + # THEN Validate that the log exception fields + log = capture_multiple_logging_statements_output(stdout) + assert log[0]["exception_name"] == "MyError" + assert "Test exception message" in log[0]["exception"] + assert "test_exception_logging_during_buffer_flush" in log[0]["exception"] def test_create_buffer_with_items_evicted(stdout, service_name, monkeypatch): From 49f308ba8a753c50f852ef4c0ef57a5fb570f500 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Sun, 2 Mar 2025 00:33:56 +0000 Subject: [PATCH 25/40] Adding e2e tests --- .../logger/handlers/buffer_logs_with_flush.py | 14 +++++ .../handlers/buffer_logs_without_flush.py | 13 ++++ tests/e2e/logger/test_logger.py | 62 +++++++++++++++++++ 3 files changed, 89 insertions(+) create mode 100644 tests/e2e/logger/handlers/buffer_logs_with_flush.py create mode 100644 tests/e2e/logger/handlers/buffer_logs_without_flush.py diff --git a/tests/e2e/logger/handlers/buffer_logs_with_flush.py b/tests/e2e/logger/handlers/buffer_logs_with_flush.py new file mode 100644 index 00000000000..8845b14fc45 --- /dev/null +++ b/tests/e2e/logger/handlers/buffer_logs_with_flush.py @@ -0,0 +1,14 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging.buffer import LoggerBufferConfig + +logger_buffer_config = LoggerBufferConfig(max_size=10240) + +logger = Logger(level="INFO", logger_buffer=logger_buffer_config) + + +def lambda_handler(event, context): + message_visible, message_buffered = event.get("message_visible", ""), event.get("message_buffered", {}) + logger.info(message_visible) + logger.debug(message_buffered) + logger.flush_buffer() + return "success" diff --git a/tests/e2e/logger/handlers/buffer_logs_without_flush.py b/tests/e2e/logger/handlers/buffer_logs_without_flush.py new file mode 100644 index 00000000000..9565d0234f3 --- /dev/null +++ b/tests/e2e/logger/handlers/buffer_logs_without_flush.py @@ -0,0 +1,13 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging.buffer import LoggerBufferConfig + +logger_buffer_config = LoggerBufferConfig(max_size=10240) + +logger = Logger(level="INFO", logger_buffer=logger_buffer_config) + + +def lambda_handler(event, context): + message_visible, message_buffered = event.get("message_visible", ""), event.get("message_buffered", {}) + logger.info(message_visible) + logger.debug(message_buffered) + return "success" diff --git a/tests/e2e/logger/test_logger.py b/tests/e2e/logger/test_logger.py index 2a9ab47f559..94fa40026b5 100644 --- a/tests/e2e/logger/test_logger.py +++ b/tests/e2e/logger/test_logger.py @@ -39,6 +39,68 @@ def multiple_logger_instances_arn(infrastructure: dict) -> str: return infrastructure.get("MultipleLoggerInstancesArn", "") +@pytest.fixture +def buffer_logs_without_flush_fn(infrastructure: dict) -> str: + return infrastructure.get("BufferLogsWithoutFlush", "") + + +@pytest.fixture +def buffer_logs_without_flush_arn(infrastructure: dict) -> str: + return infrastructure.get("BufferLogsWithoutFlushArn", "") + + +@pytest.fixture +def buffer_logs_with_flush_fn(infrastructure: dict) -> str: + return infrastructure.get("BufferLogsWithFlush", "") + + +@pytest.fixture +def buffer_logs_with_flush_arn(infrastructure: dict) -> str: + return infrastructure.get("BufferLogsWithFlushArn", "") + + +@pytest.mark.xdist_group(name="logger") +def test_buffer_logs_without_flush(buffer_logs_without_flush_fn, buffer_logs_without_flush_arn): + # GIVEN + message = "logs should be visible with default settings" + message_buffer = "not visible message" + payload = json.dumps({"message_visible": message, "message_buffered": message_buffer}) + + # WHEN + _, execution_time = data_fetcher.get_lambda_response(lambda_arn=buffer_logs_without_flush_arn, payload=payload) + data_fetcher.get_lambda_response(lambda_arn=buffer_logs_without_flush_arn, payload=payload) + + # THEN + logs = data_fetcher.get_logs( + function_name=buffer_logs_without_flush_fn, + start_time=execution_time, + minimum_log_entries=2, + ) + + assert len(logs) == 2 + + +@pytest.mark.xdist_group(name="logger") +def test_buffer_logs_with_flush(buffer_logs_with_flush_fn, buffer_logs_with_flush_arn): + # GIVEN + message = "logs should be visible with default settings" + message_buffer = "not visible message" + payload = json.dumps({"message_visible": message, "message_buffered": message_buffer}) + + # WHEN + _, execution_time = data_fetcher.get_lambda_response(lambda_arn=buffer_logs_with_flush_arn, payload=payload) + data_fetcher.get_lambda_response(lambda_arn=buffer_logs_with_flush_arn, payload=payload) + + # THEN + logs = data_fetcher.get_logs( + function_name=buffer_logs_with_flush_fn, + start_time=execution_time, + minimum_log_entries=4, + ) + + assert len(logs) == 4 + + @pytest.mark.xdist_group(name="logger") def test_basic_lambda_logs_visible(basic_handler_fn, basic_handler_fn_arn): # GIVEN From b1d81b5e671660854ccec5983127fbf6e3cf2ba3 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Sun, 2 Mar 2025 22:38:21 +0000 Subject: [PATCH 26/40] sys.getframe is more performatic --- aws_lambda_powertools/logging/buffer/functions.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/functions.py b/aws_lambda_powertools/logging/buffer/functions.py index 1bbd2814fd9..cc266354e91 100644 --- a/aws_lambda_powertools/logging/buffer/functions.py +++ b/aws_lambda_powertools/logging/buffer/functions.py @@ -1,6 +1,5 @@ from __future__ import annotations -import inspect import sys import time from typing import TYPE_CHECKING, Any, Mapping @@ -47,7 +46,7 @@ def _create_buffer_record( """ # Retrieve the caller's frame information to capture precise log context # Uses inspect.stack() with index 3 to get the original caller's details - caller_frame = inspect.stack()[3] + caller_frame = sys._getframe(3) # Get the current timestamp timestamp = time.time() @@ -62,9 +61,9 @@ def _create_buffer_record( "level": level, "msg": msg, "args": args, - "filename": caller_frame.filename, - "line": caller_frame.lineno, - "function": caller_frame.function, + "filename": caller_frame.f_code.co_filename, + "line": caller_frame.f_lineno, + "function": caller_frame.f_code.co_name, "extra": extra, "timestamp": timestamp, "exc_info": exc_info, From feaea2b607cabab2f9c0afa4bf8e1492e0939783 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Mon, 3 Mar 2025 10:50:27 +0000 Subject: [PATCH 27/40] Chaging the max size --- aws_lambda_powertools/logging/buffer/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_lambda_powertools/logging/buffer/config.py b/aws_lambda_powertools/logging/buffer/config.py index f7875c1838d..a9dc123d5f9 100644 --- a/aws_lambda_powertools/logging/buffer/config.py +++ b/aws_lambda_powertools/logging/buffer/config.py @@ -14,7 +14,7 @@ class LoggerBufferConfig: def __init__( self, - max_size: int = 10240, + max_size: int = 20480, minimum_log_level: LOG_LEVEL_BUFFER_VALUES = "DEBUG", flush_on_error: bool = True, compress: bool = False, From a46e2ff5d7ce054d975c0d8b1b6620a3ca5bcffa Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Mon, 3 Mar 2025 13:24:33 +0000 Subject: [PATCH 28/40] Removing unecessary files --- tests/performance/test_zlib_shared_functions.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 tests/performance/test_zlib_shared_functions.py diff --git a/tests/performance/test_zlib_shared_functions.py b/tests/performance/test_zlib_shared_functions.py deleted file mode 100644 index e69de29bb2d..00000000000 From 9368459e1f4bb30ddc6e16747d1e2e67d034c0ad Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Mon, 3 Mar 2025 13:29:45 +0000 Subject: [PATCH 29/40] Fix tests --- .../required_dependencies/test_logger_buffer_config.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/unit/logger/required_dependencies/test_logger_buffer_config.py b/tests/unit/logger/required_dependencies/test_logger_buffer_config.py index 7a107b7d008..09881363495 100644 --- a/tests/unit/logger/required_dependencies/test_logger_buffer_config.py +++ b/tests/unit/logger/required_dependencies/test_logger_buffer_config.py @@ -8,7 +8,7 @@ def test_default_configuration(): config_buffer = LoggerBufferConfig() # THEN default values are default - assert config_buffer.max_size == 10240 + assert config_buffer.max_size == 20480 assert config_buffer.minimum_log_level == "DEBUG" assert config_buffer.flush_on_error is True assert config_buffer.compress is False @@ -17,14 +17,14 @@ def test_default_configuration(): def test_custom_configuration(): # GIVEN a new LoggerBufferConfig with custom configuration parameters config_buffer = LoggerBufferConfig( - max_size=20480, + max_size=51200, minimum_log_level="WARNING", flush_on_error=False, compress=True, ) # THEN configuration is set with provided values - assert config_buffer.max_size == 20480 + assert config_buffer.max_size == 51200 assert config_buffer.minimum_log_level == "WARNING" assert config_buffer.flush_on_error is False assert config_buffer.compress is True From f9db4cd4db633a2542345690c3d891e9f3839112 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 5 Mar 2025 14:38:14 +0000 Subject: [PATCH 30/40] Flushing logs when log line is bigger than buffer size --- aws_lambda_powertools/logging/buffer/cache.py | 11 +--- aws_lambda_powertools/logging/logger.py | 56 +++++++++++-------- .../test_powertools_logger_buffer.py | 15 +++-- .../test_logger_buffer_cache.py | 3 +- 4 files changed, 45 insertions(+), 40 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/cache.py b/aws_lambda_powertools/logging/buffer/cache.py index 3bfbb573fe6..728147b852e 100644 --- a/aws_lambda_powertools/logging/buffer/cache.py +++ b/aws_lambda_powertools/logging/buffer/cache.py @@ -1,11 +1,8 @@ from __future__ import annotations -import warnings from collections import deque from typing import Any -from aws_lambda_powertools.warnings import PowertoolsUserWarning - class KeyBufferCache: """ @@ -136,13 +133,7 @@ def add(self, key: str, item: Any) -> None: # Check if item is larger than entire buffer item_size = len(str(item)) if item_size > self.max_size_bytes: - warnings.warn( - message=f"Cannot add item to the buffer " - f"Item size {item_size} bytes exceeds total cache size {self.max_size_bytes} bytes", - category=PowertoolsUserWarning, - stacklevel=2, - ) - return + raise BufferError("Cannot add item to the buffer") # Create the key's cache if it doesn't exist if key not in self.cache: diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index aeb1b54457f..4dce02d4255 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -269,15 +269,15 @@ def __init__( "serialize_stacktrace": serialize_stacktrace, } - self.buffer_config = logger_buffer - if self.buffer_config: - self.buffer_cache = LoggerBufferCache(max_size_bytes=self.buffer_config.max_size) + self._buffer_config = logger_buffer + if self._buffer_config: + self._buffer_cache = LoggerBufferCache(max_size_bytes=self._buffer_config.max_size) self._init_logger( formatter_options=formatter_options, log_level=level, - buffer_config=self.buffer_config, - buffer_cache=getattr(self, "buffer_cache", None), + buffer_config=self._buffer_config, + buffer_cache=getattr(self, "_buffer_cache", None), **kwargs, ) @@ -343,8 +343,8 @@ def _init_logger( # Reuse existing buffer configuration from a previously configured logger # Ensures consistent buffer settings across logger instances within the same service # Enables buffer propagation and maintains a unified logging configuration - self.buffer_config = self._logger.powertools_buffer_config # type: ignore[attr-defined] - self.buffer_cache = self._logger.powertools_buffer_cache # type: ignore[attr-defined] + self._buffer_config = self._logger.powertools_buffer_config # type: ignore[attr-defined] + self._buffer_cache = self._logger.powertools_buffer_cache # type: ignore[attr-defined] return self.setLevel(log_level) @@ -535,6 +535,8 @@ def decorate(event, context, *args, **kwargs): self.flush_buffer() # Re-raise any exceptions that occur during handler execution raise + finally: + self._buffer_cache.clear() return decorate @@ -561,7 +563,7 @@ def debug( # Also for clarity over complexity # Buffer is not active and we need to log immediately - if not self.buffer_config: + if not self._buffer_config: return self._logger.debug( msg, *args, @@ -604,7 +606,7 @@ def info( # Also for clarity over complexity # Buffer is not active and we need to log immediately - if not self.buffer_config: + if not self._buffer_config: return self._logger.info( msg, *args, @@ -615,7 +617,7 @@ def info( ) # Bypass buffer when log severity meets or exceeds configured minimum - if _check_minimum_buffer_log_level(self.buffer_config.minimum_log_level, "INFO"): + if _check_minimum_buffer_log_level(self._buffer_config.minimum_log_level, "INFO"): return self._logger.info( msg, *args, @@ -658,7 +660,7 @@ def warning( # Also for clarity over complexity # Buffer is not active and we need to log immediately - if not self.buffer_config: + if not self._buffer_config: return self._logger.warning( msg, *args, @@ -669,7 +671,7 @@ def warning( ) # Bypass buffer when log severity meets or exceeds configured minimum - if _check_minimum_buffer_log_level(self.buffer_config.minimum_log_level, "WARNING"): + if _check_minimum_buffer_log_level(self._buffer_config.minimum_log_level, "WARNING"): return self._logger.warning( msg, *args, @@ -707,7 +709,7 @@ def error( # 2. If auto-flush enabled, trigger complete buffer processing # 3. Error log is not "bufferable", so ensure error log is immediately available - if self.buffer_config and self.buffer_config.flush_on_error: + if self._buffer_config and self._buffer_config.flush_on_error: self.flush_buffer() return self._logger.error( @@ -737,7 +739,7 @@ def critical( # 2. If auto-flush enabled, trigger complete buffer processing # 3. Critical log is not "bufferable", so ensure error log is immediately available - if self.buffer_config and self.buffer_config.flush_on_error: + if self._buffer_config and self._buffer_config.flush_on_error: self.flush_buffer() return self._logger.critical( @@ -766,7 +768,7 @@ def exception( # 1. Buffer configuration checked for immediate flush # 2. If auto-flush enabled, trigger complete buffer processing # 3. Exception log is not "bufferable", so ensure error log is immediately available - if self.buffer_config and self.buffer_config.flush_on_error: + if self._buffer_config and self._buffer_config.flush_on_error: self.flush_buffer() return self._logger.exception( @@ -1116,11 +1118,6 @@ def _add_log_record_to_buffer( tracer_id = get_tracer_id() if tracer_id: - if not self.buffer_cache.get(tracer_id): - # Detect new Lambda invocation context and reset buffer to maintain log isolation - # Ensures logs from previous invocations do not leak into current execution - self.buffer_cache.clear() - log_record: dict[str, Any] = _create_buffer_record( level=level, msg=msg, @@ -1129,7 +1126,18 @@ def _add_log_record_to_buffer( stack_info=stack_info, extra=extra, ) - self.buffer_cache.add(tracer_id, log_record) + try: + self._buffer_cache.add(tracer_id, log_record) + except BufferError: + warnings.warn( + message=f"Cannot add item to the buffer. " + f"Item size exceeds total cache size {self._buffer_config.max_size} bytes", + category=PowertoolsUserWarning, + stacklevel=2, + ) + + # flush this log to avoid data loss + self._create_and_flush_log_record(log_record) def flush_buffer(self) -> None: """ @@ -1154,7 +1162,7 @@ def flush_buffer(self) -> None: return # is buffer empty? return - buffer = self.buffer_cache.get(tracer_id) + buffer = self._buffer_cache.get(tracer_id) if not buffer: return @@ -1163,7 +1171,7 @@ def flush_buffer(self) -> None: self._create_and_flush_log_record(log_line) # Has items evicted? - if self.buffer_cache.has_items_evicted(tracer_id): + if self._buffer_cache.has_items_evicted(tracer_id): warnings.warn( message="Some logs are not displayed because they were evicted from the buffer. " "Increase buffer size to store more logs in the buffer", @@ -1172,7 +1180,7 @@ def flush_buffer(self) -> None: ) # Clear the entire cache - self.buffer_cache.clear() + self._buffer_cache.clear() def set_package_logger( diff --git a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py index e8d119feb5f..57d25edb2d5 100644 --- a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py +++ b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py @@ -198,7 +198,7 @@ def test_exception_logging_during_buffer_flush(stdout, service_name, monkeypatch logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) # Custom exception class - class MyError(BaseException): + class MyError(Exception): pass # WHEN Logging an exception and flushing the buffer @@ -284,15 +284,21 @@ def test_flush_buffer_when_empty(stdout, service_name, monkeypatch): def test_log_record_exceeding_buffer_size(stdout, service_name, monkeypatch): monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") + message = "this log is bigger than entire buffer size" + # GIVEN A logger configured with a small 10-byte buffer logger_buffer_config = LoggerBufferConfig(max_size=10, minimum_log_level="DEBUG") logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) # WHEN Attempting to log a message larger than the entire buffer - # THE: A warning should be raised indicating buffer size limitation + # THEN A warning should be raised indicating buffer size limitation with pytest.warns(PowertoolsUserWarning, match="Cannot add item to the buffer*"): - logger.debug("this log is bigger than entire buffer size") + logger.debug(message) + + # THEN the log must be flushed to avoid data loss + log = capture_multiple_logging_statements_output(stdout) + assert log[0]["message"] == message @pytest.mark.parametrize("log_level", ["WARNING", "INFO"]) @@ -400,6 +406,7 @@ def test_logger_buffer_is_cleared_between_lambda_invocations(stdout, service_nam logger_buffer_config = LoggerBufferConfig(max_size=10240) logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + @logger.inject_lambda_context def handler(event, context): logger.debug("debug line") @@ -411,4 +418,4 @@ def handler(event, context): handler({}, lambda_context) # THEN Verify buffer for the original trace ID is cleared - assert not logger.buffer_cache.get("1-67c39786-5908a82a246fb67f3089263f") + assert not logger._buffer_cache.get("1-67c39786-5908a82a246fb67f3089263f") diff --git a/tests/unit/logger/required_dependencies/test_logger_buffer_cache.py b/tests/unit/logger/required_dependencies/test_logger_buffer_cache.py index d1385dfe5c9..00ae7696281 100644 --- a/tests/unit/logger/required_dependencies/test_logger_buffer_cache.py +++ b/tests/unit/logger/required_dependencies/test_logger_buffer_cache.py @@ -1,7 +1,6 @@ import pytest from aws_lambda_powertools.logging.buffer.cache import LoggerBufferCache -from aws_lambda_powertools.warnings import PowertoolsUserWarning def test_initialization(): @@ -61,7 +60,7 @@ def test_item_larger_than_cache(): logger_cache = LoggerBufferCache(5) # WHEN an item larger than cache is added - with pytest.warns(expected_warning=PowertoolsUserWarning, match="Item size *"): + with pytest.raises(BufferError): # THEN a warning is raised logger_cache.add("key1", "very_long_item") From f30eed0115b652617ef147f59e0c9d55171e0c17 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 5 Mar 2025 15:00:37 +0000 Subject: [PATCH 31/40] Flushing logs when log line is bigger than buffer size --- aws_lambda_powertools/logging/logger.py | 21 ++++++++++++++++--- .../test_powertools_logger_buffer.py | 17 +++++++++++++++ 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 4dce02d4255..57b51b4f4a2 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -536,7 +536,9 @@ def decorate(event, context, *args, **kwargs): # Re-raise any exceptions that occur during handler execution raise finally: - self._buffer_cache.clear() + # Clear the cache after invocation is complete + if self._buffer_config: + self._buffer_cache.clear() return decorate @@ -1117,7 +1119,7 @@ def _add_log_record_to_buffer( # Determine tracer ID, defaulting to first invoke marker tracer_id = get_tracer_id() - if tracer_id: + if tracer_id and self._buffer_config: log_record: dict[str, Any] = _create_buffer_record( level=level, msg=msg, @@ -1130,7 +1132,7 @@ def _add_log_record_to_buffer( self._buffer_cache.add(tracer_id, log_record) except BufferError: warnings.warn( - message=f"Cannot add item to the buffer. " + message="Cannot add item to the buffer. " f"Item size exceeds total cache size {self._buffer_config.max_size} bytes", category=PowertoolsUserWarning, stacklevel=2, @@ -1182,6 +1184,19 @@ def flush_buffer(self) -> None: # Clear the entire cache self._buffer_cache.clear() + def clear_buffer(self) -> None: + """ + Clear the internal buffer cache. + + This method removes all items from the buffer cache, effectively resetting it to an empty state. + + Returns + ------- + None + """ + if self._buffer_config: + self._buffer_cache.clear() + def set_package_logger( level: str | int = logging.DEBUG, diff --git a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py index 57d25edb2d5..d94407b1121 100644 --- a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py +++ b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py @@ -190,6 +190,23 @@ def test_ensure_log_location_after_flush_buffer(stdout, service_name, monkeypatc assert "test_ensure_log_location_after_flush_buffer" in log[0]["location"] +def test_clear_buffer_during_execution(stdout, service_name, monkeypatch): + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") + + # GIVEN A logger configured with a sufficiently large buffer + logger_buffer_config = LoggerBufferConfig(max_size=10240) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + + # WHEN we clear the buffer during the execution + logger.debug("this log line will be flushed") + logger.clear_buffer() + + # THEN not log is flushed + logger.flush_buffer() + log = capture_multiple_logging_statements_output(stdout) + assert not log + + def test_exception_logging_during_buffer_flush(stdout, service_name, monkeypatch): monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") From 452cf91afe712a161b46706afee28eb1ed8a0040 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 5 Mar 2025 19:51:59 +0000 Subject: [PATCH 32/40] Adding documentation --- .../logging/buffer/config.py | 4 +- docs/core/logger.md | 147 ++++++++++++++++++ .../getting_started_with_buffering_logs.py | 15 ++ ...ng_with_buffering_logs_different_levels.py | 16 ++ ...ng_with_buffering_logs_disable_on_error.py | 24 +++ ..._with_buffering_logs_uncaught_exception.py | 19 +++ 6 files changed, 222 insertions(+), 3 deletions(-) create mode 100644 examples/logger/src/getting_started_with_buffering_logs.py create mode 100644 examples/logger/src/working_with_buffering_logs_different_levels.py create mode 100644 examples/logger/src/working_with_buffering_logs_disable_on_error.py create mode 100644 examples/logger/src/working_with_buffering_logs_uncaught_exception.py diff --git a/aws_lambda_powertools/logging/buffer/config.py b/aws_lambda_powertools/logging/buffer/config.py index a9dc123d5f9..5716aec796d 100644 --- a/aws_lambda_powertools/logging/buffer/config.py +++ b/aws_lambda_powertools/logging/buffer/config.py @@ -17,7 +17,6 @@ def __init__( max_size: int = 20480, minimum_log_level: LOG_LEVEL_BUFFER_VALUES = "DEBUG", flush_on_error: bool = True, - compress: bool = False, ): """ Initialize logger buffer configuration. @@ -33,12 +32,11 @@ def __init__( compress : bool, optional Whether to compress buffered logs """ - self._validate_inputs(max_size, minimum_log_level, flush_on_error, compress) + self._validate_inputs(max_size, minimum_log_level, flush_on_error) self._max_size = max_size self._minimum_log_level = minimum_log_level.upper() self._flush_on_error = flush_on_error - self._compress = compress def _validate_inputs( self, diff --git a/docs/core/logger.md b/docs/core/logger.md index 3e19d8ebcd1..a137b4fa68c 100644 --- a/docs/core/logger.md +++ b/docs/core/logger.md @@ -514,6 +514,153 @@ The following environment variables are available to configure Logger at a globa ## Advanced +### Buffering logs + +Log buffering enables you to buffer logs for a specific request or invocation. Enable log buffering by passing `logger_buffer` when initializing a Logger instance. You can buffer logs at the `WARNING`, `INFO` or `DEBUG` level, and flush them automatically on error or manually as needed. + +!!! tip "This is useful when you want to keep logs clean while still providing detailed diagnostic information when needed." + +=== "getting_started_with_buffering_logs.py" + + ```python hl_lines="5 8" + --8<-- "examples/logger/src/getting_started_with_buffering_logs.py" + ``` + +#### Customizing Buffer configuration + +When configuring log buffering, you have options to fine-tune how logs are captured, stored, and emitted. You can configure the following parameters in the `LoggerBufferConfig` constructor: + +| Parameter | Description | Configuration | +|-------------------- |------------------------------------------------ |----------------------------- | +| `max_size` | Maximum size of the log buffer in bytes | `int` (default: 20480 bytes) | +| `minimum_log_level` | Minimum log level to buffer | `DEBUG`, `INFO`, `WARNING` | +| `flush_on_error` | Automatically flush buffer when an error occurs | `True` (default), `False` | + +=== "working_with_buffering_logs_different_levels.py" + + ```python hl_lines="5 8" + --8<-- "examples/logger/src/working_with_buffering_logs_different_levels.py" + ``` + +=== "working_with_buffering_logs_disable_on_error.py" + + ```python hl_lines="5 8" + --8<-- "examples/logger/src/working_with_buffering_logs_disable_on_error.py" + ``` + +#### Flushing on uncaught exceptions + +Use the `@logger.inject_lambda_context` decorator to automatically flush buffered logs when an uncaught exception occurs in your Lambda function. The `flush_buffer_on_uncaught_error` parameter captures and flush all buffered logs records before the Lambda execution terminates. + +=== "working_with_buffering_logs_uncaught_exception.py" + + ```python hl_lines="5 8" + --8<-- "examples/logger/src/working_with_buffering_logs_uncaught_exception.py" + ``` + +#### Buffering workflows + +##### Flushing manually + +```mermaid +sequenceDiagram + participant Client + participant Lambda + participant Logger + participant CloudWatch + + Client->>Lambda: Invoke Lambda + Lambda->>Logger: Initialize with DEBUG level buffering + Logger-->>Lambda: Logger buffer ready + + Lambda->>Logger: logger.debug("First debug log") + Logger-->>Logger: Buffer first debug log + + Lambda->>Logger: logger.info("Info log") + Logger->>CloudWatch: Directly log info message + + Lambda->>Logger: logger.debug("Second debug log") + Logger-->>Logger: Buffer second debug log + + Lambda->>Logger: logger.flush_buffer() + Logger->>CloudWatch: Flush buffered debug logs to CloudWatch + + Lambda->>Client: Return execution result + +``` + +##### Flushing on error + +```mermaid +sequenceDiagram + participant Client + participant Lambda + participant Logger + participant CloudWatch + + Client->>Lambda: Invoke Lambda + Lambda->>Logger: Initialize with DEBUG level buffering + Logger-->>Lambda: Logger buffer ready + + Lambda->>Logger: logger.debug("First log") + Logger-->>Logger: Buffer first debug log + + Lambda->>Logger: logger.debug("Second log") + Logger-->>Logger: Buffer second debug log + + Lambda->>Logger: logger.debug("Third log") + Logger-->>Logger: Buffer third debug log + + Lambda->>Lambda: Exception occurs + Lambda->>Logger: logger.error("Error details") + Logger->>CloudWatch: Send buffered debug logs + Logger->>CloudWatch: Send error log + + Lambda->>Client: Raise exception +``` + +##### Flushing when uncaught exception happens + +```mermaid +sequenceDiagram + participant Client + participant Lambda + participant Logger + participant CloudWatch + + Client->>Lambda: Invoke Lambda + Lambda->>Logger: Using decorator @logger.inject_lambda_context(flush_buffer_on_uncaught_error=True) + Logger-->>Lambda: Logger context injected + + Lambda->>Logger: logger.debug("First log") + Logger-->>Logger: Buffer first debug log + + Lambda->>Logger: logger.debug("Second log") + Logger-->>Logger: Buffer second debug log + + Lambda->>Lambda: Uncaught Exception + Lambda->>CloudWatch: Automatically send buffered debug logs + + Lambda->>Client: Raise uncaught exception + +``` + +#### Common buffering questions + +When using log buffering to control log emissions in your AWS Lambda functions, it's important to follow best practices to avoid introducing complexity or unnecessary overhead. Keep these guidelines in mind: + +1. **How can I prevent log buffering from consuming excessive memory?** Set a `max_size` in `LoggerBufferConfig` to limit the buffer's memory footprint. + +2. **What happens if the log buffer reaches its maximum size?** The oldest logs are discarded when the buffer is full, making room for new logs. You need to set an appropriate `max_size` configuration. + +3. **Can I customize when logs are flushed?** Yes, use the `flush_on_error=True` in `LoggerBufferConfig` or use `flush_buffer_on_uncaught_error` in `@logger.inject_lambda_context` decorator. + +4. **What timestamp is used when I flush the logs?** The timestamp preserves the original time when the log record was created. If you create a log record at 11:00:10 and flush it at 11:00:25, the log line will retain its original timestamp of 11:00:10. + +5. **What happens if I try to add a log line that is bigger than max buffer size?** It will not buffer, but log as a normal log. + +6. **What happens if Lambda times out without flushing the buffer?** Buffer will be lost and no buffered logs will be flushed. + ### Built-in Correlation ID expressions You can use any of the following built-in JMESPath expressions as part of [inject_lambda_context decorator](#setting-a-correlation-id). diff --git a/examples/logger/src/getting_started_with_buffering_logs.py b/examples/logger/src/getting_started_with_buffering_logs.py new file mode 100644 index 00000000000..60ff421edf1 --- /dev/null +++ b/examples/logger/src/getting_started_with_buffering_logs.py @@ -0,0 +1,15 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging.buffer import LoggerBufferConfig +from aws_lambda_powertools.utilities.typing import LambdaContext + +logger_buffer_config = LoggerBufferConfig(max_size=20480, flush_on_error=True) +logger = Logger(level="INFO", logger_buffer=logger_buffer_config) + + +def lambda_handler(event: dict, context: LambdaContext): + logger.debug("a debug log") # this is buffered + logger.info("an info log") # this is not buffered + + # do stuff + + logger.flush_buffer() diff --git a/examples/logger/src/working_with_buffering_logs_different_levels.py b/examples/logger/src/working_with_buffering_logs_different_levels.py new file mode 100644 index 00000000000..82b61cf370e --- /dev/null +++ b/examples/logger/src/working_with_buffering_logs_different_levels.py @@ -0,0 +1,16 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging.buffer import LoggerBufferConfig +from aws_lambda_powertools.utilities.typing import LambdaContext + +logger_buffer_config = LoggerBufferConfig(max_size=20480, minimum_log_level="WARNING") +logger = Logger(level="INFO", logger_buffer=logger_buffer_config) + + +def lambda_handler(event: dict, context: LambdaContext): + logger.warning("a warning log") # this is buffered + logger.info("an info log") # this is buffered + logger.debug("a debug log") # this is buffered + + # do stuff + + logger.flush_buffer() diff --git a/examples/logger/src/working_with_buffering_logs_disable_on_error.py b/examples/logger/src/working_with_buffering_logs_disable_on_error.py new file mode 100644 index 00000000000..8dca2dc25eb --- /dev/null +++ b/examples/logger/src/working_with_buffering_logs_disable_on_error.py @@ -0,0 +1,24 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging.buffer import LoggerBufferConfig +from aws_lambda_powertools.utilities.typing import LambdaContext + +logger_buffer_config = LoggerBufferConfig(max_size=20480, flush_on_error=False) +logger = Logger(level="INFO", logger_buffer=logger_buffer_config) + + +class MyException(Exception): + pass + + +def lambda_handler(event: dict, context: LambdaContext): + logger.debug("a debug log") # this is buffered + + # do stuff + + try: + raise MyException + except MyException as error: + logger.error("An error ocurrend", exc_info=error) # Logs won't be flushed here + + # Need to flush logs manually + logger.flush_buffer() diff --git a/examples/logger/src/working_with_buffering_logs_uncaught_exception.py b/examples/logger/src/working_with_buffering_logs_uncaught_exception.py new file mode 100644 index 00000000000..0fdc5b81600 --- /dev/null +++ b/examples/logger/src/working_with_buffering_logs_uncaught_exception.py @@ -0,0 +1,19 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging.buffer import LoggerBufferConfig +from aws_lambda_powertools.utilities.typing import LambdaContext + +logger_buffer_config = LoggerBufferConfig(max_size=20480, flush_on_error=False) +logger = Logger(level="INFO", logger_buffer=logger_buffer_config) + + +class MyException(Exception): + pass + + +@logger.inject_lambda_context(flush_buffer_on_uncaught_error=True) +def lambda_handler(event: dict, context: LambdaContext): + logger.debug("a debug log") # this is buffered + + # do stuff + + raise MyException # Logs will be flushed here From 2d8eb3973909ee8b516cb03b2117e87fe98d3fcc Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 5 Mar 2025 20:46:36 +0000 Subject: [PATCH 33/40] Adding documentation --- .../logging/buffer/config.py | 9 ------ docs/core/logger.md | 28 ++++++------------- .../test_logger_buffer_config.py | 13 --------- 3 files changed, 9 insertions(+), 41 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/config.py b/aws_lambda_powertools/logging/buffer/config.py index 5716aec796d..7879f1bc478 100644 --- a/aws_lambda_powertools/logging/buffer/config.py +++ b/aws_lambda_powertools/logging/buffer/config.py @@ -43,7 +43,6 @@ def _validate_inputs( max_size: int, minimum_log_level: str, flush_on_error: bool, - compress: bool, ) -> None: """ Validate configuration inputs. @@ -65,9 +64,6 @@ def _validate_inputs( if not isinstance(flush_on_error, bool): raise ValueError("flush_on_error must be a boolean") - if not isinstance(compress, bool): - raise ValueError("compress must be a boolean") - @property def max_size(self) -> int: """Maximum buffer size in bytes.""" @@ -82,8 +78,3 @@ def minimum_log_level(self) -> str: def flush_on_error(self) -> bool: """Flag to flush buffer on error.""" return self._flush_on_error - - @property - def compress(self) -> bool: - """Flag to compress buffered logs.""" - return self._compress diff --git a/docs/core/logger.md b/docs/core/logger.md index a137b4fa68c..f3b723522f7 100644 --- a/docs/core/logger.md +++ b/docs/core/logger.md @@ -562,88 +562,78 @@ Use the `@logger.inject_lambda_context` decorator to automatically flush buffere ##### Flushing manually +
```mermaid sequenceDiagram participant Client participant Lambda participant Logger participant CloudWatch - Client->>Lambda: Invoke Lambda Lambda->>Logger: Initialize with DEBUG level buffering Logger-->>Lambda: Logger buffer ready - Lambda->>Logger: logger.debug("First debug log") Logger-->>Logger: Buffer first debug log - Lambda->>Logger: logger.info("Info log") Logger->>CloudWatch: Directly log info message - Lambda->>Logger: logger.debug("Second debug log") Logger-->>Logger: Buffer second debug log - Lambda->>Logger: logger.flush_buffer() Logger->>CloudWatch: Flush buffered debug logs to CloudWatch - Lambda->>Client: Return execution result - ``` +Flushing buffer manually +
##### Flushing on error +
```mermaid sequenceDiagram participant Client participant Lambda participant Logger participant CloudWatch - Client->>Lambda: Invoke Lambda Lambda->>Logger: Initialize with DEBUG level buffering Logger-->>Lambda: Logger buffer ready - Lambda->>Logger: logger.debug("First log") Logger-->>Logger: Buffer first debug log - Lambda->>Logger: logger.debug("Second log") Logger-->>Logger: Buffer second debug log - Lambda->>Logger: logger.debug("Third log") Logger-->>Logger: Buffer third debug log - Lambda->>Lambda: Exception occurs Lambda->>Logger: logger.error("Error details") Logger->>CloudWatch: Send buffered debug logs Logger->>CloudWatch: Send error log - Lambda->>Client: Raise exception ``` +Flushing buffer when an error happens +
##### Flushing when uncaught exception happens +
```mermaid sequenceDiagram participant Client participant Lambda participant Logger participant CloudWatch - Client->>Lambda: Invoke Lambda Lambda->>Logger: Using decorator @logger.inject_lambda_context(flush_buffer_on_uncaught_error=True) Logger-->>Lambda: Logger context injected - Lambda->>Logger: logger.debug("First log") Logger-->>Logger: Buffer first debug log - Lambda->>Logger: logger.debug("Second log") Logger-->>Logger: Buffer second debug log - Lambda->>Lambda: Uncaught Exception Lambda->>CloudWatch: Automatically send buffered debug logs - Lambda->>Client: Raise uncaught exception - ``` +Flushing buffer when an uncaught exception happens +
#### Common buffering questions diff --git a/tests/unit/logger/required_dependencies/test_logger_buffer_config.py b/tests/unit/logger/required_dependencies/test_logger_buffer_config.py index 09881363495..9567a8ef338 100644 --- a/tests/unit/logger/required_dependencies/test_logger_buffer_config.py +++ b/tests/unit/logger/required_dependencies/test_logger_buffer_config.py @@ -11,7 +11,6 @@ def test_default_configuration(): assert config_buffer.max_size == 20480 assert config_buffer.minimum_log_level == "DEBUG" assert config_buffer.flush_on_error is True - assert config_buffer.compress is False def test_custom_configuration(): @@ -20,14 +19,12 @@ def test_custom_configuration(): max_size=51200, minimum_log_level="WARNING", flush_on_error=False, - compress=True, ) # THEN configuration is set with provided values assert config_buffer.max_size == 51200 assert config_buffer.minimum_log_level == "WARNING" assert config_buffer.flush_on_error is False - assert config_buffer.compress is True def test_invalid_max_size_negative(): @@ -79,13 +76,3 @@ def test_invalid_flush_on_error(): with pytest.raises(ValueError, match="flush_on_error must be a boolean"): # THEN a ValueError is raised LoggerBufferConfig(flush_on_error=invalid_flush_on_error) - - -def test_invalid_compress(): - # GIVEN an invalid compress type - invalid_compress = "False" - - # WHEN creating a LoggerBufferConfig / THEN - with pytest.raises(ValueError, match="compress must be a boolean"): - # THEN a ValueError is raised - LoggerBufferConfig(compress=invalid_compress) From a5a12a5c2b5fefb172b50fd5f83112760a00af0a Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 6 Mar 2025 09:27:20 +0000 Subject: [PATCH 34/40] Adding documentation --- docs/core/logger.md | 25 +++++++++++++++++++ ...g_with_buffering_logs_creating_instance.py | 5 ++++ ...ng_with_buffering_logs_reusing_function.py | 6 +++++ ...ing_with_buffering_logs_reusing_handler.py | 12 +++++++++ 4 files changed, 48 insertions(+) create mode 100644 examples/logger/src/working_with_buffering_logs_creating_instance.py create mode 100644 examples/logger/src/working_with_buffering_logs_reusing_function.py create mode 100644 examples/logger/src/working_with_buffering_logs_reusing_handler.py diff --git a/docs/core/logger.md b/docs/core/logger.md index f3b723522f7..764bb618618 100644 --- a/docs/core/logger.md +++ b/docs/core/logger.md @@ -558,6 +558,31 @@ Use the `@logger.inject_lambda_context` decorator to automatically flush buffere --8<-- "examples/logger/src/working_with_buffering_logs_uncaught_exception.py" ``` +#### Reutilizing same buffer instance + +Reuse the same logger instance across multiple files within a service. Doing this you can centralize logger instance creation and prevent buffer configuration drift. + +!!! note "Buffer Inheritance" + Loggers created with the same `service_name` automatically inherit the buffer configuration from the first initialized logger with a buffer configuration. + +=== "working_with_buffering_logs_creating_instance.py" + + ```python hl_lines="5 8" + --8<-- "examples/logger/src/working_with_buffering_logs_creating_instance.py" + ``` + +=== "working_with_buffering_logs_reusing_handler.py" + + ```python hl_lines="5 8" + --8<-- "examples/logger/src/working_with_buffering_logs_reusing_handler.py" + ``` + +=== "working_with_buffering_logs_reusing_function.py" + + ```python hl_lines="5 8" + --8<-- "examples/logger/src/working_with_buffering_logs_reusing_function.py" + ``` + #### Buffering workflows ##### Flushing manually diff --git a/examples/logger/src/working_with_buffering_logs_creating_instance.py b/examples/logger/src/working_with_buffering_logs_creating_instance.py new file mode 100644 index 00000000000..99036be8e8d --- /dev/null +++ b/examples/logger/src/working_with_buffering_logs_creating_instance.py @@ -0,0 +1,5 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging.buffer import LoggerBufferConfig + +logger_buffer_config = LoggerBufferConfig(max_size=20480, minimum_log_level="WARNING") +logger = Logger(level="INFO", logger_buffer=logger_buffer_config) diff --git a/examples/logger/src/working_with_buffering_logs_reusing_function.py b/examples/logger/src/working_with_buffering_logs_reusing_function.py new file mode 100644 index 00000000000..3de22289bbe --- /dev/null +++ b/examples/logger/src/working_with_buffering_logs_reusing_function.py @@ -0,0 +1,6 @@ +from working_with_buffering_logs_creating_instance import logger # reusing same instance + + +def my_function(): + logger.debug("This will be buffered") + # do stuff diff --git a/examples/logger/src/working_with_buffering_logs_reusing_handler.py b/examples/logger/src/working_with_buffering_logs_reusing_handler.py new file mode 100644 index 00000000000..96f28c47916 --- /dev/null +++ b/examples/logger/src/working_with_buffering_logs_reusing_handler.py @@ -0,0 +1,12 @@ +from working_with_buffering_logs_creating_instance import logger # reusing same instance +from working_with_buffering_logs_reusing_function import my_function + +from aws_lambda_powertools.utilities.typing import LambdaContext + + +def lambda_handler(event: dict, context: LambdaContext): + logger.debug("a debug log") # this is buffered + + my_function() + + logger.flush_buffer() From 46fc1df362d3ae77453cbac1e21778c4cc8f6c78 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 6 Mar 2025 16:52:47 +0000 Subject: [PATCH 35/40] Addressing Andrea's feedback --- docs/core/logger.md | 26 ++++++++++--------- ...ng_with_buffering_logs_different_levels.py | 2 +- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/docs/core/logger.md b/docs/core/logger.md index 764bb618618..64f80f97353 100644 --- a/docs/core/logger.md +++ b/docs/core/logger.md @@ -526,7 +526,7 @@ Log buffering enables you to buffer logs for a specific request or invocation. E --8<-- "examples/logger/src/getting_started_with_buffering_logs.py" ``` -#### Customizing Buffer configuration +#### Configuring the buffer When configuring log buffering, you have options to fine-tune how logs are captured, stored, and emitted. You can configure the following parameters in the `LoggerBufferConfig` constructor: @@ -548,9 +548,9 @@ When configuring log buffering, you have options to fine-tune how logs are captu --8<-- "examples/logger/src/working_with_buffering_logs_disable_on_error.py" ``` -#### Flushing on uncaught exceptions +#### Flushing on exceptions -Use the `@logger.inject_lambda_context` decorator to automatically flush buffered logs when an uncaught exception occurs in your Lambda function. The `flush_buffer_on_uncaught_error` parameter captures and flush all buffered logs records before the Lambda execution terminates. +Use the `@logger.inject_lambda_context` decorator to automatically flush buffered logs when an exception is raised in your Lambda function. The `flush_buffer_on_uncaught_error` parameter captures and flush all buffered logs records before the Lambda execution terminates. === "working_with_buffering_logs_uncaught_exception.py" @@ -604,13 +604,13 @@ sequenceDiagram Lambda->>Logger: logger.debug("Second debug log") Logger-->>Logger: Buffer second debug log Lambda->>Logger: logger.flush_buffer() - Logger->>CloudWatch: Flush buffered debug logs to CloudWatch + Logger->>CloudWatch: Emit buffered logs to stdout Lambda->>Client: Return execution result ``` Flushing buffer manually -##### Flushing on error +##### Flushing when logging an error
```mermaid @@ -630,14 +630,16 @@ sequenceDiagram Logger-->>Logger: Buffer third debug log Lambda->>Lambda: Exception occurs Lambda->>Logger: logger.error("Error details") - Logger->>CloudWatch: Send buffered debug logs - Logger->>CloudWatch: Send error log + Logger->>CloudWatch: Emit buffered debug logs + Logger->>CloudWatch: Emit error log Lambda->>Client: Raise exception ``` Flushing buffer when an error happens
-##### Flushing when uncaught exception happens +##### Flushing on exception + +This works only when decorating your Lambda handler with the decorator `@logger.inject_lambda_context(flush_buffer_on_uncaught_error=True)`
```mermaid @@ -647,14 +649,14 @@ sequenceDiagram participant Logger participant CloudWatch Client->>Lambda: Invoke Lambda - Lambda->>Logger: Using decorator @logger.inject_lambda_context(flush_buffer_on_uncaught_error=True) + Lambda->>Logger: Using decorator Logger-->>Lambda: Logger context injected Lambda->>Logger: logger.debug("First log") Logger-->>Logger: Buffer first debug log Lambda->>Logger: logger.debug("Second log") Logger-->>Logger: Buffer second debug log Lambda->>Lambda: Uncaught Exception - Lambda->>CloudWatch: Automatically send buffered debug logs + Lambda->>CloudWatch: Automatically emit buffered debug logs Lambda->>Client: Raise uncaught exception ``` Flushing buffer when an uncaught exception happens @@ -666,13 +668,13 @@ When using log buffering to control log emissions in your AWS Lambda functions, 1. **How can I prevent log buffering from consuming excessive memory?** Set a `max_size` in `LoggerBufferConfig` to limit the buffer's memory footprint. -2. **What happens if the log buffer reaches its maximum size?** The oldest logs are discarded when the buffer is full, making room for new logs. You need to set an appropriate `max_size` configuration. +2. **What happens if the log buffer reaches its maximum size?** The oldest logs are discarded when the buffer is full, making room for new logs. You need to set an appropriate `max_size` configuration. When this happens, we emit a warning when flushing the buffer to let you know this happened. 3. **Can I customize when logs are flushed?** Yes, use the `flush_on_error=True` in `LoggerBufferConfig` or use `flush_buffer_on_uncaught_error` in `@logger.inject_lambda_context` decorator. 4. **What timestamp is used when I flush the logs?** The timestamp preserves the original time when the log record was created. If you create a log record at 11:00:10 and flush it at 11:00:25, the log line will retain its original timestamp of 11:00:10. -5. **What happens if I try to add a log line that is bigger than max buffer size?** It will not buffer, but log as a normal log. +5. **What happens if I try to add a log line that is bigger than max buffer size?** It will not buffer, but log as a normal log and emit a warning. 6. **What happens if Lambda times out without flushing the buffer?** Buffer will be lost and no buffered logs will be flushed. diff --git a/examples/logger/src/working_with_buffering_logs_different_levels.py b/examples/logger/src/working_with_buffering_logs_different_levels.py index 82b61cf370e..a6d6c65f093 100644 --- a/examples/logger/src/working_with_buffering_logs_different_levels.py +++ b/examples/logger/src/working_with_buffering_logs_different_levels.py @@ -2,7 +2,7 @@ from aws_lambda_powertools.logging.buffer import LoggerBufferConfig from aws_lambda_powertools.utilities.typing import LambdaContext -logger_buffer_config = LoggerBufferConfig(max_size=20480, minimum_log_level="WARNING") +logger_buffer_config = LoggerBufferConfig(minimum_log_level="WARNING") logger = Logger(level="INFO", logger_buffer=logger_buffer_config) From b1e2f992581048a32acff7f3ad8f87df8974b985 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 6 Mar 2025 17:02:56 +0000 Subject: [PATCH 36/40] Addressing Andrea's feedback --- docs/core/logger.md | 14 ++++++++------ ...working_with_buffering_logs_different_levels.py | 2 +- ...ng_with_buffering_logs_when_raise_exception.py} | 0 3 files changed, 9 insertions(+), 7 deletions(-) rename examples/logger/src/{working_with_buffering_logs_uncaught_exception.py => working_with_buffering_logs_when_raise_exception.py} (100%) diff --git a/docs/core/logger.md b/docs/core/logger.md index 64f80f97353..220dd3d343b 100644 --- a/docs/core/logger.md +++ b/docs/core/logger.md @@ -522,7 +522,7 @@ Log buffering enables you to buffer logs for a specific request or invocation. E === "getting_started_with_buffering_logs.py" - ```python hl_lines="5 8" + ```python hl_lines="5 6 15" --8<-- "examples/logger/src/getting_started_with_buffering_logs.py" ``` @@ -538,13 +538,15 @@ When configuring log buffering, you have options to fine-tune how logs are captu === "working_with_buffering_logs_different_levels.py" - ```python hl_lines="5 8" + ```python hl_lines="5 6 10-12" --8<-- "examples/logger/src/working_with_buffering_logs_different_levels.py" ``` + 1. Setting `minimum_log_level="WARNING"` configures log buffering for `WARNING` and lower severity levels (`INFO`, `DEBUG`). + === "working_with_buffering_logs_disable_on_error.py" - ```python hl_lines="5 8" + ```python hl_lines="5 6 14 21 24" --8<-- "examples/logger/src/working_with_buffering_logs_disable_on_error.py" ``` @@ -552,10 +554,10 @@ When configuring log buffering, you have options to fine-tune how logs are captu Use the `@logger.inject_lambda_context` decorator to automatically flush buffered logs when an exception is raised in your Lambda function. The `flush_buffer_on_uncaught_error` parameter captures and flush all buffered logs records before the Lambda execution terminates. -=== "working_with_buffering_logs_uncaught_exception.py" +=== "working_with_buffering_logs_when_raise_exception.py" - ```python hl_lines="5 8" - --8<-- "examples/logger/src/working_with_buffering_logs_uncaught_exception.py" + ```python hl_lines="5 6 13 19" + --8<-- "examples/logger/src/working_with_buffering_logs_when_raise_exception.py" ``` #### Reutilizing same buffer instance diff --git a/examples/logger/src/working_with_buffering_logs_different_levels.py b/examples/logger/src/working_with_buffering_logs_different_levels.py index a6d6c65f093..829b5e8db5f 100644 --- a/examples/logger/src/working_with_buffering_logs_different_levels.py +++ b/examples/logger/src/working_with_buffering_logs_different_levels.py @@ -2,7 +2,7 @@ from aws_lambda_powertools.logging.buffer import LoggerBufferConfig from aws_lambda_powertools.utilities.typing import LambdaContext -logger_buffer_config = LoggerBufferConfig(minimum_log_level="WARNING") +logger_buffer_config = LoggerBufferConfig(minimum_log_level="WARNING") # (1)! logger = Logger(level="INFO", logger_buffer=logger_buffer_config) diff --git a/examples/logger/src/working_with_buffering_logs_uncaught_exception.py b/examples/logger/src/working_with_buffering_logs_when_raise_exception.py similarity index 100% rename from examples/logger/src/working_with_buffering_logs_uncaught_exception.py rename to examples/logger/src/working_with_buffering_logs_when_raise_exception.py From f5618ea762c830c36e20e193cf44ba6088d2a842 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 6 Mar 2025 17:57:33 +0000 Subject: [PATCH 37/40] Addressing Andrea's feedback --- docs/core/logger.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/core/logger.md b/docs/core/logger.md index 220dd3d343b..343d87b9544 100644 --- a/docs/core/logger.md +++ b/docs/core/logger.md @@ -560,28 +560,28 @@ Use the `@logger.inject_lambda_context` decorator to automatically flush buffere --8<-- "examples/logger/src/working_with_buffering_logs_when_raise_exception.py" ``` -#### Reutilizing same buffer instance +#### Reutilizing same logger instance -Reuse the same logger instance across multiple files within a service. Doing this you can centralize logger instance creation and prevent buffer configuration drift. +If you are using log buffering, we recommend sharing the same log instance across your code/modules, so that the same buffer is also shared. Doing this you can centralize logger instance creation and prevent buffer configuration drift. !!! note "Buffer Inheritance" Loggers created with the same `service_name` automatically inherit the buffer configuration from the first initialized logger with a buffer configuration. === "working_with_buffering_logs_creating_instance.py" - ```python hl_lines="5 8" + ```python hl_lines="2 5" --8<-- "examples/logger/src/working_with_buffering_logs_creating_instance.py" ``` === "working_with_buffering_logs_reusing_handler.py" - ```python hl_lines="5 8" + ```python hl_lines="1 8 12" --8<-- "examples/logger/src/working_with_buffering_logs_reusing_handler.py" ``` === "working_with_buffering_logs_reusing_function.py" - ```python hl_lines="5 8" + ```python hl_lines="1" --8<-- "examples/logger/src/working_with_buffering_logs_reusing_function.py" ``` From 1da9bf29d9a5abaff09fcb3ff0f9636eca6a1865 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 6 Mar 2025 21:29:03 +0000 Subject: [PATCH 38/40] Refactoring parameters name + child logger + tests --- .../logging/buffer/config.py | 48 ++++--- aws_lambda_powertools/logging/logger.py | 23 ++-- docs/core/logger.md | 17 ++- .../getting_started_with_buffering_logs.py | 4 +- ...g_with_buffering_logs_creating_instance.py | 4 +- ...ng_with_buffering_logs_different_levels.py | 4 +- ...ng_with_buffering_logs_disable_on_error.py | 4 +- ...ith_buffering_logs_when_raise_exception.py | 4 +- .../logger/handlers/buffer_logs_with_flush.py | 4 +- .../handlers/buffer_logs_without_flush.py | 4 +- .../test_powertools_logger_buffer.py | 129 +++++++++++++----- .../test_logger_buffer_config.py | 30 ++-- 12 files changed, 170 insertions(+), 105 deletions(-) diff --git a/aws_lambda_powertools/logging/buffer/config.py b/aws_lambda_powertools/logging/buffer/config.py index 7879f1bc478..cd8a7935fdf 100644 --- a/aws_lambda_powertools/logging/buffer/config.py +++ b/aws_lambda_powertools/logging/buffer/config.py @@ -14,35 +14,33 @@ class LoggerBufferConfig: def __init__( self, - max_size: int = 20480, - minimum_log_level: LOG_LEVEL_BUFFER_VALUES = "DEBUG", - flush_on_error: bool = True, + max_bytes: int = 20480, + buffer_at_verbosity: LOG_LEVEL_BUFFER_VALUES = "DEBUG", + flush_on_error_log: bool = True, ): """ Initialize logger buffer configuration. Parameters ---------- - max_size : int, optional + max_bytes : int, optional Maximum size of the buffer in bytes - minimum_log_level : str, optional + buffer_at_verbosity : str, optional Minimum log level to buffer - flush_on_error : bool, optional + flush_on_error_log : bool, optional Whether to flush the buffer when an error occurs - compress : bool, optional - Whether to compress buffered logs """ - self._validate_inputs(max_size, minimum_log_level, flush_on_error) + self._validate_inputs(max_bytes, buffer_at_verbosity, flush_on_error_log) - self._max_size = max_size - self._minimum_log_level = minimum_log_level.upper() - self._flush_on_error = flush_on_error + self._max_bytes = max_bytes + self._buffer_at_verbosity = buffer_at_verbosity.upper() + self._flush_on_error_log = flush_on_error_log def _validate_inputs( self, - max_size: int, - minimum_log_level: str, - flush_on_error: bool, + max_bytes: int, + buffer_at_verbosity: str, + flush_on_error_log: bool, ) -> None: """ Validate configuration inputs. @@ -51,30 +49,30 @@ def _validate_inputs( ---------- Same as __init__ method parameters """ - if not isinstance(max_size, int) or max_size <= 0: + if not isinstance(max_bytes, int) or max_bytes <= 0: raise ValueError("Max size must be a positive integer") - if not isinstance(minimum_log_level, str): + if not isinstance(buffer_at_verbosity, str): raise ValueError("Log level must be a string") # Validate log level - if minimum_log_level.upper() not in self.VALID_LOG_LEVELS: + if buffer_at_verbosity.upper() not in self.VALID_LOG_LEVELS: raise ValueError(f"Invalid log level. Must be one of {self.VALID_LOG_LEVELS}") - if not isinstance(flush_on_error, bool): + if not isinstance(flush_on_error_log, bool): raise ValueError("flush_on_error must be a boolean") @property - def max_size(self) -> int: + def max_bytes(self) -> int: """Maximum buffer size in bytes.""" - return self._max_size + return self._max_bytes @property - def minimum_log_level(self) -> str: + def buffer_at_verbosity(self) -> str: """Minimum log level to buffer.""" - return self._minimum_log_level + return self._buffer_at_verbosity @property - def flush_on_error(self) -> bool: + def flush_on_error_log(self) -> bool: """Flag to flush buffer on error.""" - return self._flush_on_error + return self._flush_on_error_log diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 57b51b4f4a2..c57cc600f61 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -227,7 +227,7 @@ def __init__( utc: bool = False, use_rfc3339: bool = False, serialize_stacktrace: bool = True, - logger_buffer: LoggerBufferConfig | None = None, + buffer_config: LoggerBufferConfig | None = None, **kwargs, ) -> None: @@ -269,9 +269,9 @@ def __init__( "serialize_stacktrace": serialize_stacktrace, } - self._buffer_config = logger_buffer + self._buffer_config = buffer_config if self._buffer_config: - self._buffer_cache = LoggerBufferCache(max_size_bytes=self._buffer_config.max_size) + self._buffer_cache = LoggerBufferCache(max_size_bytes=self._buffer_config.max_bytes) self._init_logger( formatter_options=formatter_options, @@ -337,6 +337,11 @@ def _init_logger( is_logger_preconfigured = getattr(self._logger, LOGGER_ATTRIBUTE_PRECONFIGURED, False) if self.child: self.setLevel(log_level) + if getattr(self._logger.parent, "powertools_buffer_config", None): + # Initializes a new, empty LoggerBufferCache for child logger + # Preserves parent's buffer configuration while resetting cache contents + self._buffer_config = self._logger.parent.powertools_buffer_config # type: ignore[union-attr] + self._buffer_cache = LoggerBufferCache(self._logger.parent.powertools_buffer_config.max_bytes) # type: ignore[union-attr] return if is_logger_preconfigured: @@ -619,7 +624,7 @@ def info( ) # Bypass buffer when log severity meets or exceeds configured minimum - if _check_minimum_buffer_log_level(self._buffer_config.minimum_log_level, "INFO"): + if _check_minimum_buffer_log_level(self._buffer_config.buffer_at_verbosity, "INFO"): return self._logger.info( msg, *args, @@ -673,7 +678,7 @@ def warning( ) # Bypass buffer when log severity meets or exceeds configured minimum - if _check_minimum_buffer_log_level(self._buffer_config.minimum_log_level, "WARNING"): + if _check_minimum_buffer_log_level(self._buffer_config.buffer_at_verbosity, "WARNING"): return self._logger.warning( msg, *args, @@ -711,7 +716,7 @@ def error( # 2. If auto-flush enabled, trigger complete buffer processing # 3. Error log is not "bufferable", so ensure error log is immediately available - if self._buffer_config and self._buffer_config.flush_on_error: + if self._buffer_config and self._buffer_config.flush_on_error_log: self.flush_buffer() return self._logger.error( @@ -741,7 +746,7 @@ def critical( # 2. If auto-flush enabled, trigger complete buffer processing # 3. Critical log is not "bufferable", so ensure error log is immediately available - if self._buffer_config and self._buffer_config.flush_on_error: + if self._buffer_config and self._buffer_config.flush_on_error_log: self.flush_buffer() return self._logger.critical( @@ -770,7 +775,7 @@ def exception( # 1. Buffer configuration checked for immediate flush # 2. If auto-flush enabled, trigger complete buffer processing # 3. Exception log is not "bufferable", so ensure error log is immediately available - if self._buffer_config and self._buffer_config.flush_on_error: + if self._buffer_config and self._buffer_config.flush_on_error_log: self.flush_buffer() return self._logger.exception( @@ -1133,7 +1138,7 @@ def _add_log_record_to_buffer( except BufferError: warnings.warn( message="Cannot add item to the buffer. " - f"Item size exceeds total cache size {self._buffer_config.max_size} bytes", + f"Item size exceeds total cache size {self._buffer_config.max_bytes} bytes", category=PowertoolsUserWarning, stacklevel=2, ) diff --git a/docs/core/logger.md b/docs/core/logger.md index 343d87b9544..06b0fa50e9c 100644 --- a/docs/core/logger.md +++ b/docs/core/logger.md @@ -11,6 +11,7 @@ Logger provides an opinionated logger with output structured as JSON. * Log Lambda event when instructed (disabled by default) * Log sampling enables DEBUG log level for a percentage of requests (disabled by default) * Append additional keys to structured log at any point in time +* Buffering logs for a specific request or invocation, and flushing them automatically on error or manually as needed. ## Getting started @@ -530,11 +531,13 @@ Log buffering enables you to buffer logs for a specific request or invocation. E When configuring log buffering, you have options to fine-tune how logs are captured, stored, and emitted. You can configure the following parameters in the `LoggerBufferConfig` constructor: -| Parameter | Description | Configuration | -|-------------------- |------------------------------------------------ |----------------------------- | -| `max_size` | Maximum size of the log buffer in bytes | `int` (default: 20480 bytes) | -| `minimum_log_level` | Minimum log level to buffer | `DEBUG`, `INFO`, `WARNING` | -| `flush_on_error` | Automatically flush buffer when an error occurs | `True` (default), `False` | +| Parameter | Description | Configuration | +|---------------------- |------------------------------------------------ |----------------------------- | +| `max_bytes` | Maximum size of the log buffer in bytes | `int` (default: 20480 bytes) | +| `buffer_at_verbosity` | Minimum log level to buffer | `DEBUG`, `INFO`, `WARNING` | +| `flush_on_error_log` | Automatically flush buffer when an error occurs | `True` (default), `False` | + +!!! note "When `flush_on_error_log` is enabled, it automatically flushes for `logger.exception()`, `logger.error()`, and `logger.critical()` statements." === "working_with_buffering_logs_different_levels.py" @@ -550,6 +553,8 @@ When configuring log buffering, you have options to fine-tune how logs are captu --8<-- "examples/logger/src/working_with_buffering_logs_disable_on_error.py" ``` + 1. Disabling `flush_on_error_log` will not flush the buffer when logging an error. This is useful when you want to control when the buffer is flushed by calling the `logger.flush_buffer()` method. + #### Flushing on exceptions Use the `@logger.inject_lambda_context` decorator to automatically flush buffered logs when an exception is raised in your Lambda function. The `flush_buffer_on_uncaught_error` parameter captures and flush all buffered logs records before the Lambda execution terminates. @@ -567,6 +572,8 @@ If you are using log buffering, we recommend sharing the same log instance acros !!! note "Buffer Inheritance" Loggers created with the same `service_name` automatically inherit the buffer configuration from the first initialized logger with a buffer configuration. + Child loggers instances inherit their parent's buffer configuration but maintain a separate buffer. + === "working_with_buffering_logs_creating_instance.py" ```python hl_lines="2 5" diff --git a/examples/logger/src/getting_started_with_buffering_logs.py b/examples/logger/src/getting_started_with_buffering_logs.py index 60ff421edf1..8e210662aa0 100644 --- a/examples/logger/src/getting_started_with_buffering_logs.py +++ b/examples/logger/src/getting_started_with_buffering_logs.py @@ -2,8 +2,8 @@ from aws_lambda_powertools.logging.buffer import LoggerBufferConfig from aws_lambda_powertools.utilities.typing import LambdaContext -logger_buffer_config = LoggerBufferConfig(max_size=20480, flush_on_error=True) -logger = Logger(level="INFO", logger_buffer=logger_buffer_config) +logger_buffer_config = LoggerBufferConfig(max_bytes=20480, flush_on_error_log=True) +logger = Logger(level="INFO", buffer_config=logger_buffer_config) def lambda_handler(event: dict, context: LambdaContext): diff --git a/examples/logger/src/working_with_buffering_logs_creating_instance.py b/examples/logger/src/working_with_buffering_logs_creating_instance.py index 99036be8e8d..32acc20b5ce 100644 --- a/examples/logger/src/working_with_buffering_logs_creating_instance.py +++ b/examples/logger/src/working_with_buffering_logs_creating_instance.py @@ -1,5 +1,5 @@ from aws_lambda_powertools import Logger from aws_lambda_powertools.logging.buffer import LoggerBufferConfig -logger_buffer_config = LoggerBufferConfig(max_size=20480, minimum_log_level="WARNING") -logger = Logger(level="INFO", logger_buffer=logger_buffer_config) +logger_buffer_config = LoggerBufferConfig(max_bytes=20480, buffer_at_verbosity="WARNING") +logger = Logger(level="INFO", buffer_config=logger_buffer_config) diff --git a/examples/logger/src/working_with_buffering_logs_different_levels.py b/examples/logger/src/working_with_buffering_logs_different_levels.py index 829b5e8db5f..20a735c7501 100644 --- a/examples/logger/src/working_with_buffering_logs_different_levels.py +++ b/examples/logger/src/working_with_buffering_logs_different_levels.py @@ -2,8 +2,8 @@ from aws_lambda_powertools.logging.buffer import LoggerBufferConfig from aws_lambda_powertools.utilities.typing import LambdaContext -logger_buffer_config = LoggerBufferConfig(minimum_log_level="WARNING") # (1)! -logger = Logger(level="INFO", logger_buffer=logger_buffer_config) +logger_buffer_config = LoggerBufferConfig(buffer_at_verbosity="WARNING") # (1)! +logger = Logger(level="INFO", buffer_config=logger_buffer_config) def lambda_handler(event: dict, context: LambdaContext): diff --git a/examples/logger/src/working_with_buffering_logs_disable_on_error.py b/examples/logger/src/working_with_buffering_logs_disable_on_error.py index 8dca2dc25eb..5e5f7555e7d 100644 --- a/examples/logger/src/working_with_buffering_logs_disable_on_error.py +++ b/examples/logger/src/working_with_buffering_logs_disable_on_error.py @@ -2,8 +2,8 @@ from aws_lambda_powertools.logging.buffer import LoggerBufferConfig from aws_lambda_powertools.utilities.typing import LambdaContext -logger_buffer_config = LoggerBufferConfig(max_size=20480, flush_on_error=False) -logger = Logger(level="INFO", logger_buffer=logger_buffer_config) +logger_buffer_config = LoggerBufferConfig(flush_on_error_log=False) # (1)! +logger = Logger(level="INFO", buffer_config=logger_buffer_config) class MyException(Exception): diff --git a/examples/logger/src/working_with_buffering_logs_when_raise_exception.py b/examples/logger/src/working_with_buffering_logs_when_raise_exception.py index 0fdc5b81600..20f39efcdb1 100644 --- a/examples/logger/src/working_with_buffering_logs_when_raise_exception.py +++ b/examples/logger/src/working_with_buffering_logs_when_raise_exception.py @@ -2,8 +2,8 @@ from aws_lambda_powertools.logging.buffer import LoggerBufferConfig from aws_lambda_powertools.utilities.typing import LambdaContext -logger_buffer_config = LoggerBufferConfig(max_size=20480, flush_on_error=False) -logger = Logger(level="INFO", logger_buffer=logger_buffer_config) +logger_buffer_config = LoggerBufferConfig(max_bytes=20480, flush_on_error_log=False) +logger = Logger(level="INFO", buffer_config=logger_buffer_config) class MyException(Exception): diff --git a/tests/e2e/logger/handlers/buffer_logs_with_flush.py b/tests/e2e/logger/handlers/buffer_logs_with_flush.py index 8845b14fc45..bcf70db3291 100644 --- a/tests/e2e/logger/handlers/buffer_logs_with_flush.py +++ b/tests/e2e/logger/handlers/buffer_logs_with_flush.py @@ -1,9 +1,9 @@ from aws_lambda_powertools import Logger from aws_lambda_powertools.logging.buffer import LoggerBufferConfig -logger_buffer_config = LoggerBufferConfig(max_size=10240) +logger_buffer_config = LoggerBufferConfig(max_bytes=10240) -logger = Logger(level="INFO", logger_buffer=logger_buffer_config) +logger = Logger(level="INFO", buffer_config=logger_buffer_config) def lambda_handler(event, context): diff --git a/tests/e2e/logger/handlers/buffer_logs_without_flush.py b/tests/e2e/logger/handlers/buffer_logs_without_flush.py index 9565d0234f3..ef606c0c474 100644 --- a/tests/e2e/logger/handlers/buffer_logs_without_flush.py +++ b/tests/e2e/logger/handlers/buffer_logs_without_flush.py @@ -1,9 +1,9 @@ from aws_lambda_powertools import Logger from aws_lambda_powertools.logging.buffer import LoggerBufferConfig -logger_buffer_config = LoggerBufferConfig(max_size=10240) +logger_buffer_config = LoggerBufferConfig(max_bytes=10240) -logger = Logger(level="INFO", logger_buffer=logger_buffer_config) +logger = Logger(level="INFO", buffer_config=logger_buffer_config) def lambda_handler(event, context): diff --git a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py index d94407b1121..1e5a104be83 100644 --- a/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py +++ b/tests/functional/logger/required_dependencies/test_powertools_logger_buffer.py @@ -52,8 +52,8 @@ def test_logger_buffer_with_minimum_level_warning(log_level, stdout, service_nam monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with a buffer and minimum log level set to WARNING - logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="WARNING") - logger = Logger(level=log_level, service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger_buffer_config = LoggerBufferConfig(max_bytes=10240, buffer_at_verbosity="WARNING") + logger = Logger(level=log_level, service=service_name, stream=stdout, buffer_config=logger_buffer_config) msg = "This is a test" log_command = { @@ -73,8 +73,8 @@ def test_logger_buffer_with_minimum_level_warning(log_level, stdout, service_nam def test_logger_buffer_is_never_buffered_with_exception(stdout, service_name): # GIVEN A logger configured with a buffer and default logging behavior - logger_buffer_config = LoggerBufferConfig(max_size=10240) - logger = Logger(service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger_buffer_config = LoggerBufferConfig(max_bytes=10240) + logger = Logger(service=service_name, stream=stdout, buffer_config=logger_buffer_config) # WHEN An exception is raised and logged try: @@ -89,8 +89,8 @@ def test_logger_buffer_is_never_buffered_with_exception(stdout, service_name): def test_logger_buffer_is_never_buffered_with_error(stdout, service_name): # GIVEN A logger configured with a buffer and default logging behavior - logger_buffer_config = LoggerBufferConfig(max_size=10240) - logger = Logger(service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger_buffer_config = LoggerBufferConfig(max_bytes=10240) + logger = Logger(service=service_name, stream=stdout, buffer_config=logger_buffer_config) # WHEN Logging an error message logger.error("Received an exception") @@ -105,8 +105,8 @@ def test_logger_buffer_is_flushed_when_an_error_happens(stdout, service_name, lo monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with buffer and automatic error-based flushing - logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG", flush_on_error=True) - logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger_buffer_config = LoggerBufferConfig(max_bytes=10240, buffer_at_verbosity="DEBUG", flush_on_error_log=True) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, buffer_config=logger_buffer_config) # WHEN Adding debug log messages before triggering an error logger.debug("this log line will be flushed") @@ -134,8 +134,8 @@ def test_logger_buffer_is_not_flushed_when_an_error_happens(stdout, service_name monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with a buffer and error flushing disabled - logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG", flush_on_error=False) - logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger_buffer_config = LoggerBufferConfig(max_bytes=10240, buffer_at_verbosity="DEBUG", flush_on_error_log=False) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, buffer_config=logger_buffer_config) # WHEN Adding debug log messages before an error logger.debug("this log line will be flushed") @@ -162,8 +162,8 @@ def test_create_and_flush_logs(stdout, service_name, monkeypatch): monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with a large buffer - logger_buffer_config = LoggerBufferConfig(max_size=10240) - logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger_buffer_config = LoggerBufferConfig(max_bytes=10240) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, buffer_config=logger_buffer_config) # WHEN Logging a message and then flushing the buffer logger.debug("this log line will be flushed") @@ -178,8 +178,8 @@ def test_ensure_log_location_after_flush_buffer(stdout, service_name, monkeypatc monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with a sufficiently large buffer - logger_buffer_config = LoggerBufferConfig(max_size=10240) - logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger_buffer_config = LoggerBufferConfig(max_bytes=10240) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, buffer_config=logger_buffer_config) # WHEN Logging a debug message and immediately flushing the buffer logger.debug("this log line will be flushed") @@ -194,8 +194,8 @@ def test_clear_buffer_during_execution(stdout, service_name, monkeypatch): monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with a sufficiently large buffer - logger_buffer_config = LoggerBufferConfig(max_size=10240) - logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger_buffer_config = LoggerBufferConfig(max_bytes=10240) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, buffer_config=logger_buffer_config) # WHEN we clear the buffer during the execution logger.debug("this log line will be flushed") @@ -211,8 +211,8 @@ def test_exception_logging_during_buffer_flush(stdout, service_name, monkeypatch monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with a sufficiently large buffer - logger_buffer_config = LoggerBufferConfig(max_size=10240) - logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger_buffer_config = LoggerBufferConfig(max_bytes=10240) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, buffer_config=logger_buffer_config) # Custom exception class class MyError(Exception): @@ -237,8 +237,8 @@ def test_create_buffer_with_items_evicted(stdout, service_name, monkeypatch): monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with a 1024-byte buffer - logger_buffer_config = LoggerBufferConfig(max_size=1024, minimum_log_level="DEBUG") - logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger_buffer_config = LoggerBufferConfig(max_bytes=1024, buffer_at_verbosity="DEBUG") + logger = Logger(level="DEBUG", service=service_name, stream=stdout, buffer_config=logger_buffer_config) # WHEN Adding multiple log entries that exceed buffer size logger.debug("this log line will be flushed") @@ -256,8 +256,8 @@ def test_create_buffer_with_items_evicted_with_next_invocation(stdout, service_n monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with a 1024-byte buffer - logger_buffer_config = LoggerBufferConfig(max_size=1024, minimum_log_level="DEBUG") - logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger_buffer_config = LoggerBufferConfig(max_bytes=1024, buffer_at_verbosity="DEBUG") + logger = Logger(level="DEBUG", service=service_name, stream=stdout, buffer_config=logger_buffer_config) # WHEN Adding multiple log entries that exceed buffer size message = "this log line will be flushed" @@ -286,9 +286,9 @@ def test_flush_buffer_when_empty(stdout, service_name, monkeypatch): monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN: A logger configured with a 1024-byte buffer - logger_buffer_config = LoggerBufferConfig(max_size=1024, minimum_log_level="DEBUG") + logger_buffer_config = LoggerBufferConfig(max_bytes=1024, buffer_at_verbosity="DEBUG") - logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, buffer_config=logger_buffer_config) # WHEN: Flushing the buffer without adding any log entries logger.flush_buffer() @@ -304,9 +304,9 @@ def test_log_record_exceeding_buffer_size(stdout, service_name, monkeypatch): message = "this log is bigger than entire buffer size" # GIVEN A logger configured with a small 10-byte buffer - logger_buffer_config = LoggerBufferConfig(max_size=10, minimum_log_level="DEBUG") + logger_buffer_config = LoggerBufferConfig(max_bytes=10, buffer_at_verbosity="DEBUG") - logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, buffer_config=logger_buffer_config) # WHEN Attempting to log a message larger than the entire buffer # THEN A warning should be raised indicating buffer size limitation @@ -323,8 +323,8 @@ def test_logger_buffer_log_output_for_levels_above_minimum(log_level, stdout, se monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with a buffer and minimum log level set to DEBUG - logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG") - logger = Logger(level=log_level, service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger_buffer_config = LoggerBufferConfig(max_bytes=10240, buffer_at_verbosity="DEBUG") + logger = Logger(level=log_level, service=service_name, stream=stdout, buffer_config=logger_buffer_config) msg = f"This is a test with level {log_level}" log_command = { @@ -346,8 +346,8 @@ def test_logger_buffer_flush_on_uncaught_exception(stdout, service_name, monkeyp monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN: A logger configured with a large buffer and error-based flushing - logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG") - logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger_buffer_config = LoggerBufferConfig(max_bytes=10240, buffer_at_verbosity="DEBUG") + logger = Logger(level="DEBUG", service=service_name, stream=stdout, buffer_config=logger_buffer_config) @logger.inject_lambda_context(flush_buffer_on_uncaught_error=True) def handler(event, context): @@ -371,8 +371,8 @@ def test_logger_buffer_not_flush_on_uncaught_exception(stdout, service_name, mon monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN: A logger configured with a large buffer and error-based flushing - logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG") - logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger_buffer_config = LoggerBufferConfig(max_bytes=10240, buffer_at_verbosity="DEBUG") + logger = Logger(level="DEBUG", service=service_name, stream=stdout, buffer_config=logger_buffer_config) @logger.inject_lambda_context(flush_buffer_on_uncaught_error=False) def handler(event, context): @@ -390,14 +390,14 @@ def handler(event, context): assert len(log) == 0 -def test_buffer_configuration_propagation_across_logger_instances(stdout, service_name, monkeypatch): +def test_buffer_configuration_and_buffer_propagation_across_logger_instances(stdout, service_name, monkeypatch): monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with specific buffer settings - logger_buffer_config = LoggerBufferConfig(max_size=10240, minimum_log_level="DEBUG") + logger_buffer_config = LoggerBufferConfig(max_bytes=10240, buffer_at_verbosity="DEBUG") # Create primary logger with explicit buffer configuration - primary_logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + primary_logger = Logger(level="DEBUG", service=service_name, stream=stdout, buffer_config=logger_buffer_config) # Create secondary logger for the same service (should inherit buffer config) secondary_logger = Logger(level="DEBUG", service=service_name) @@ -415,13 +415,68 @@ def test_buffer_configuration_propagation_across_logger_instances(stdout, servic assert primary_logger._logger.powertools_buffer_config == secondary_logger._logger.powertools_buffer_config +def test_buffer_config_isolation_between_loggers_with_different_services(stdout, service_name, monkeypatch): + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") + + # GIVEN A logger configured with specific buffer settings + logger_buffer_config = LoggerBufferConfig(max_bytes=10240, buffer_at_verbosity="DEBUG") + + # Create primary logger with explicit buffer configuration + buffered_logger = Logger(level="DEBUG", service=service_name, stream=stdout, buffer_config=logger_buffer_config) + + # Configure another logger with a different service name + unbuffered_logger = Logger(level="DEBUG", service="powertoolsxyz") + + # WHEN + # Log messages using both loggers and flush the buffer + buffered_logger.debug("Log message from buffered logger") + unbuffered_logger.debug("Log message from unbuffered logger") + buffered_logger.flush_buffer() + + # THEN The buffered logger's message is present in the output + # THEN The loggers have different buffer configurations + log = capture_multiple_logging_statements_output(stdout) + + assert "Log message from buffered logger" == log[0]["message"] + assert len(log) == 1 + assert buffered_logger._logger.powertools_buffer_config != unbuffered_logger._logger.powertools_buffer_config + + +def test_buffer_configuration_propagation_across_child_logger_instances(stdout, service_name, monkeypatch): + monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") + + # GIVEN A logger configured with specific buffer settings + logger_buffer_config = LoggerBufferConfig(max_bytes=10240, buffer_at_verbosity="DEBUG") + + # Create primary logger with explicit buffer configuration + primary_logger = Logger(level="DEBUG", service=service_name, stream=stdout, buffer_config=logger_buffer_config) + + # Create a child log + secondary_logger = Logger(level="DEBUG", service=service_name, child=True) + + # WHEN Logging messages and flushing the buffer + primary_logger.debug("Log message from primary logger") + secondary_logger.debug("Log message from secondary logger") + + primary_logger.flush_buffer() + + # THEN Verify log messages are correctly captured and output only for primary logger + # 1. Only one log message is output (from parent logger) + # 2. Buffer configuration is shared between parent and child + # 3. Buffer caches remain separate between instances + log = capture_multiple_logging_statements_output(stdout) + assert len(log) == 1 + assert primary_logger._buffer_config == secondary_logger._buffer_config + assert primary_logger._buffer_cache != secondary_logger._buffer_cache + + def test_logger_buffer_is_cleared_between_lambda_invocations(stdout, service_name, monkeypatch, lambda_context): # Set initial trace ID for first Lambda invocation monkeypatch.setenv(constants.XRAY_TRACE_ID_ENV, "1-67c39786-5908a82a246fb67f3089263f") # GIVEN A logger configured with specific buffer parameters - logger_buffer_config = LoggerBufferConfig(max_size=10240) - logger = Logger(level="DEBUG", service=service_name, stream=stdout, logger_buffer=logger_buffer_config) + logger_buffer_config = LoggerBufferConfig(max_bytes=10240) + logger = Logger(level="DEBUG", service=service_name, stream=stdout, buffer_config=logger_buffer_config) @logger.inject_lambda_context def handler(event, context): diff --git a/tests/unit/logger/required_dependencies/test_logger_buffer_config.py b/tests/unit/logger/required_dependencies/test_logger_buffer_config.py index 9567a8ef338..1cb0f1a5f0d 100644 --- a/tests/unit/logger/required_dependencies/test_logger_buffer_config.py +++ b/tests/unit/logger/required_dependencies/test_logger_buffer_config.py @@ -8,23 +8,23 @@ def test_default_configuration(): config_buffer = LoggerBufferConfig() # THEN default values are default - assert config_buffer.max_size == 20480 - assert config_buffer.minimum_log_level == "DEBUG" - assert config_buffer.flush_on_error is True + assert config_buffer.max_bytes == 20480 + assert config_buffer.buffer_at_verbosity == "DEBUG" + assert config_buffer.flush_on_error_log is True def test_custom_configuration(): # GIVEN a new LoggerBufferConfig with custom configuration parameters config_buffer = LoggerBufferConfig( - max_size=51200, - minimum_log_level="WARNING", - flush_on_error=False, + max_bytes=51200, + buffer_at_verbosity="WARNING", + flush_on_error_log=False, ) # THEN configuration is set with provided values - assert config_buffer.max_size == 51200 - assert config_buffer.minimum_log_level == "WARNING" - assert config_buffer.flush_on_error is False + assert config_buffer.max_bytes == 51200 + assert config_buffer.buffer_at_verbosity == "WARNING" + assert config_buffer.flush_on_error_log is False def test_invalid_max_size_negative(): @@ -34,7 +34,7 @@ def test_invalid_max_size_negative(): # WHEN creating a LoggerBufferConfig with pytest.raises(ValueError, match="Max size must be a positive integer"): # THEN a ValueError is raised - LoggerBufferConfig(max_size=invalid_max_size) + LoggerBufferConfig(max_bytes=invalid_max_size) def test_invalid_max_size_type(): @@ -44,7 +44,7 @@ def test_invalid_max_size_type(): # WHEN creating a LoggerBufferConfig with pytest.raises(ValueError, match="Max size must be a positive integer"): # THEN a ValueError is raised - LoggerBufferConfig(max_size=invalid_max_size) + LoggerBufferConfig(max_bytes=invalid_max_size) def test_invalid_log_level(): @@ -55,7 +55,7 @@ def test_invalid_log_level(): for invalid_log_level in invalid_log_levels: # THEN a ValueError is raised with pytest.raises(ValueError): - LoggerBufferConfig(minimum_log_level=invalid_log_level) + LoggerBufferConfig(buffer_at_verbosity=invalid_log_level) def test_case_insensitive_log_level(): @@ -64,8 +64,8 @@ def test_case_insensitive_log_level(): # WHEN / THEN for log_level in test_cases: - config = LoggerBufferConfig(minimum_log_level=log_level) - assert config.minimum_log_level == log_level.upper() + config = LoggerBufferConfig(buffer_at_verbosity=log_level) + assert config.buffer_at_verbosity == log_level.upper() def test_invalid_flush_on_error(): @@ -75,4 +75,4 @@ def test_invalid_flush_on_error(): # WHEN creating a LoggerBufferConfig / THEN with pytest.raises(ValueError, match="flush_on_error must be a boolean"): # THEN a ValueError is raised - LoggerBufferConfig(flush_on_error=invalid_flush_on_error) + LoggerBufferConfig(flush_on_error_log=invalid_flush_on_error) From a64df933c63bce6c9f7cdc3b2b629827cc8a0365 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 6 Mar 2025 21:39:57 +0000 Subject: [PATCH 39/40] Improve documentation --- docs/core/logger.md | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/docs/core/logger.md b/docs/core/logger.md index 06b0fa50e9c..5d7c7941b72 100644 --- a/docs/core/logger.md +++ b/docs/core/logger.md @@ -519,7 +519,7 @@ The following environment variables are available to configure Logger at a globa Log buffering enables you to buffer logs for a specific request or invocation. Enable log buffering by passing `logger_buffer` when initializing a Logger instance. You can buffer logs at the `WARNING`, `INFO` or `DEBUG` level, and flush them automatically on error or manually as needed. -!!! tip "This is useful when you want to keep logs clean while still providing detailed diagnostic information when needed." +!!! tip "This is useful when you want to reduce the number of log messages emitted while still having detailed logs when needed, such as when troubleshooting issues." === "getting_started_with_buffering_logs.py" @@ -557,7 +557,7 @@ When configuring log buffering, you have options to fine-tune how logs are captu #### Flushing on exceptions -Use the `@logger.inject_lambda_context` decorator to automatically flush buffered logs when an exception is raised in your Lambda function. The `flush_buffer_on_uncaught_error` parameter captures and flush all buffered logs records before the Lambda execution terminates. +Use the `@logger.inject_lambda_context` decorator to automatically flush buffered logs when an exception is raised in your Lambda function. This is done by setting the `flush_buffer_on_uncaught_error` option to `True` in the decorator. === "working_with_buffering_logs_when_raise_exception.py" @@ -594,7 +594,7 @@ If you are using log buffering, we recommend sharing the same log instance acros #### Buffering workflows -##### Flushing manually +##### Manual flush
```mermaid @@ -671,21 +671,23 @@ sequenceDiagram Flushing buffer when an uncaught exception happens
-#### Common buffering questions +#### Buffering FAQs -When using log buffering to control log emissions in your AWS Lambda functions, it's important to follow best practices to avoid introducing complexity or unnecessary overhead. Keep these guidelines in mind: +1. **Does the buffer persist across Lambda invocations?** No, each Lambda invocation has its own buffer. The buffer is initialized when the Lambda function is invoked and is cleared after the function execution completes or when flushed manually. -1. **How can I prevent log buffering from consuming excessive memory?** Set a `max_size` in `LoggerBufferConfig` to limit the buffer's memory footprint. +2. **Are my logs buffered during cold starts?** No, we never buffer logs during cold starts. This is because we want to ensure that logs emitted during this phase are always available for debugging and monitoring purposes. The buffer is only used during the execution of the Lambda function. -2. **What happens if the log buffer reaches its maximum size?** The oldest logs are discarded when the buffer is full, making room for new logs. You need to set an appropriate `max_size` configuration. When this happens, we emit a warning when flushing the buffer to let you know this happened. +3. **How can I prevent log buffering from consuming excessive memory?** You can limit the size of the buffer by setting the `max_bytes` option in the `LoggerBufferConfig` constructor parameter. This will ensure that the buffer does not grow indefinitely and consume excessive memory. -3. **Can I customize when logs are flushed?** Yes, use the `flush_on_error=True` in `LoggerBufferConfig` or use `flush_buffer_on_uncaught_error` in `@logger.inject_lambda_context` decorator. +4. **What happens if the log buffer reaches its maximum size?** Older logs are removed from the buffer to make room for new logs. This means that if the buffer is full, you may lose some logs if they are not flushed before the buffer reaches its maximum size. When this happens, we emit a warning when flushing the buffer to indicate that some logs have been dropped. -4. **What timestamp is used when I flush the logs?** The timestamp preserves the original time when the log record was created. If you create a log record at 11:00:10 and flush it at 11:00:25, the log line will retain its original timestamp of 11:00:10. +5. **What timestamp is used when I flush the logs?** The timestamp preserves the original time when the log record was created. If you create a log record at 11:00:10 and flush it at 11:00:25, the log line will retain its original timestamp of 11:00:10. -5. **What happens if I try to add a log line that is bigger than max buffer size?** It will not buffer, but log as a normal log and emit a warning. +6. **What happens if I try to add a log line that is bigger than max buffer size?** The log will be emitted directly to standard output and not buffered. When this happens, we emit a warning to indicate that the log line was too big to be buffered. -6. **What happens if Lambda times out without flushing the buffer?** Buffer will be lost and no buffered logs will be flushed. +7. **What happens if Lambda times out without flushing the buffer?** Logs that are still in the buffer will be lost. + +8. **Do child loggers inherit the buffer?** No, child loggers do not inherit the buffer from their parent logger but only the buffer configuration. This means that if you create a child logger, it will have its own buffer and will not share the buffer with the parent logger. ### Built-in Correlation ID expressions From 5c9eb79c1dfa3d3932d80ec5a9d444008a86a913 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 7 Mar 2025 08:33:17 +0000 Subject: [PATCH 40/40] Improve docstring --- aws_lambda_powertools/logging/logger.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index c57cc600f61..58b3c50ca3e 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -109,6 +109,8 @@ class Logger: custom logging handler e.g. logging.FileHandler("file.log") log_uncaught_exceptions: bool, by default False logs uncaught exception using sys.excepthook + buffer_config: LoggerBufferConfig, optional + logger buffer configuration See: https://docs.python.org/3/library/sys.html#sys.excepthook