diff --git a/README.md b/README.md index 175e8b98470..9a1245bfa80 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Lambda Powertools -![PackageStatus](https://img.shields.io/static/v1?label=status&message=beta&color=blueviolet?style=flat-square) ![PythonSupport](https://img.shields.io/static/v1?label=python&message=3.6%20|%203.7|%203.8&color=blue?style=flat-square&logo=python) +![Python Build](https://github.com/awslabs/aws-lambda-powertools/workflows/Powertools%20Python/badge.svg?branch=master) A suite of utilities for AWS Lambda Functions that makes tracing with AWS X-Ray, structured logging and creating custom metrics asynchronously easier. diff --git a/python/HISTORY.md b/python/HISTORY.md index 9d9d296c1c2..8001c9bba2c 100644 --- a/python/HISTORY.md +++ b/python/HISTORY.md @@ -1,5 +1,11 @@ # HISTORY +## April 24th + +* Introduces `Logger` for stuctured logging as a replacement for `logger_setup` +* Introduces `Logger.inject_lambda_context` decorator as a replacement for `logger_inject_lambda_context` +* Raise `DeprecationWarning` exception for both `logger_setup`, `logger_inject_lambda_context` + ## April 20th, 2020 **0.7.0** diff --git a/python/README.md b/python/README.md index 5a96dc80b63..ff22f799077 100644 --- a/python/README.md +++ b/python/README.md @@ -13,7 +13,7 @@ A suite of utilities for AWS Lambda Functions that makes tracing with AWS X-Ray, > It currently uses AWS X-Ray * Decorators that capture cold start as annotation, and response and exceptions as metadata -* Run functions locally without code change to disable tracing +* Run functions locally with SAM CLI without code change to disable tracing * Explicitly disable tracing via env var `POWERTOOLS_TRACE_DISABLED="true"` **Logging** @@ -24,6 +24,7 @@ A suite of utilities for AWS Lambda Functions that makes tracing with AWS X-Ray, * Logs canonical custom metric line to logs that can be consumed asynchronously * Log sampling enables DEBUG log level for a percentage of requests (disabled by default) - Enable via `POWERTOOLS_LOGGER_SAMPLE_RATE=0.1`, ranges from 0 to 1, where 0.1 is 10% and 1 is 100% +* Append additional keys to structured log at any point in time so they're available across log statements **Metrics** @@ -113,6 +114,8 @@ tracer = Tracer(auto_patch=False) # new instance using existing configuration wi ### Logging +> **NOTE** `logger_setup` and `logger_inject_lambda_context` are deprecated and will be completely removed once it's GA. + **Example SAM template using supported environment variables** ```yaml @@ -128,13 +131,12 @@ Globals: **Pseudo Python Lambda code** ```python -from aws_lambda_powertools.logging import logger_setup, logger_inject_lambda_context +from aws_lambda_powertools.logging import Logger -logger = logger_setup() -# logger_setup(service="payment") # also accept explicit service name -# logger_setup(level="INFO") # also accept explicit log level +logger = Logger() +# Logger(service="payment", level="INFO") # also accepts explicit service name, log level -@logger_inject_lambda_context +@logger.inject_lambda_context def handler(event, context) logger.info("Collecting payment") ... @@ -159,6 +161,7 @@ def handler(event, context) "lambda_function_arn":"arn:aws:lambda:eu-west-1:12345678910:function:test", "lambda_request_id":"52fdfc07-2182-154f-163f-5f0f9a621d72", "cold_start": "true", + "sampling_rate": 0.1, "message": "Collecting payment" } @@ -172,6 +175,7 @@ def handler(event, context) "lambda_function_arn":"arn:aws:lambda:eu-west-1:12345678910:function:test", "lambda_request_id":"52fdfc07-2182-154f-163f-5f0f9a621d72", "cold_start": "true", + "sampling_rate": 0.1, "message":{ "operation":"collect_payment", "charge_id": "ch_AZFlk2345C0" @@ -179,6 +183,40 @@ def handler(event, context) } ``` +**Append additional keys to structured log** + +```python +from aws_lambda_powertools.logging import Logger + +logger = Logger() + +@logger.inject_lambda_context +def handler(event, context) + if "order_id" in event: + logger.structure_logs(append=True, order_id=event["order_id"]) + logger.info("Collecting payment") + ... +``` + +**Exerpt output in CloudWatch Logs** + +```json +{ + "timestamp":"2019-08-22 18:17:33,774", + "level":"INFO", + "location":"collect.handler:1", + "service":"payment", + "lambda_function_name":"test", + "lambda_function_memory_size":"128", + "lambda_function_arn":"arn:aws:lambda:eu-west-1:12345678910:function:test", + "lambda_request_id":"52fdfc07-2182-154f-163f-5f0f9a621d72", + "cold_start": "true", + "sampling_rate": 0.1, + "order_id": "order_id_value", + "message": "Collecting payment" +} +``` + ### Custom Metrics async > **NOTE** `log_metric` will be removed once it's GA. @@ -229,6 +267,7 @@ with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1) as metric: metric.add_dimension(name="function_context", value="$LATEST") ``` +> **NOTE**: If you want to instantiate Metrics() in multiple places in your code, make sure to use `POWERTOOLS_METRICS_NAMESPACE` env var as we don't keep a copy of that across instances. ### Utilities @@ -320,12 +359,19 @@ def lambda_handler(event, context): return True ``` -## Beta -> **[Progress towards GA](https://github.com/awslabs/aws-lambda-powertools/projects/1)** +### Debug mode + +By default, all debug log statements from AWS Lambda Powertools package are suppressed. If you'd like to enable them, use `set_package_logger` utility: + +```python +import aws_lambda_powertools +aws_lambda_powertools.logging.logger.set_package_logger() +... +``` + +## Beta -This library may change its API/methods or environment variables as it receives feedback from customers. Currently looking for ideas in the following areas before making it stable: +This library may change its API/methods or environment variables as it receives feedback from customers -* **Should Tracer patch all possible imported libraries by default or only AWS SDKs?** - - Patching all libraries may have a small performance penalty (~50ms) at cold start - - Alternatively, we could patch only AWS SDK if available and to provide a param to patch multiple `Tracer(modules=("boto3", "requests"))` +**[Progress towards GA](https://github.com/awslabs/aws-lambda-powertools/projects/1)** diff --git a/python/aws_lambda_powertools/__init__.py b/python/aws_lambda_powertools/__init__.py index b048ddfe742..2be705fb386 100644 --- a/python/aws_lambda_powertools/__init__.py +++ b/python/aws_lambda_powertools/__init__.py @@ -1,5 +1,10 @@ # -*- coding: utf-8 -*- """Top-level package for Lambda Python Powertools.""" +import logging __author__ = """Amazon Web Services""" + +logger = logging.getLogger("aws_lambda_powertools") +logger.addHandler(logging.NullHandler()) +logger.propagate = False diff --git a/python/aws_lambda_powertools/logging/__init__.py b/python/aws_lambda_powertools/logging/__init__.py index 855abba4635..4c1bb2ec5c6 100644 --- a/python/aws_lambda_powertools/logging/__init__.py +++ b/python/aws_lambda_powertools/logging/__init__.py @@ -1,6 +1,6 @@ """Logging utility """ from ..helper.models import MetricUnit -from .logger import log_metric, logger_inject_lambda_context, logger_setup +from .logger import Logger, log_metric, logger_inject_lambda_context, logger_setup -__all__ = ["logger_setup", "logger_inject_lambda_context", "log_metric", "MetricUnit"] +__all__ = ["logger_setup", "logger_inject_lambda_context", "log_metric", "MetricUnit", "Logger"] diff --git a/python/aws_lambda_powertools/logging/aws_lambda_logging.py b/python/aws_lambda_powertools/logging/aws_lambda_logging.py deleted file mode 100644 index 1b42ec3c707..00000000000 --- a/python/aws_lambda_powertools/logging/aws_lambda_logging.py +++ /dev/null @@ -1,98 +0,0 @@ -"""Microlibrary to simplify logging in AWS Lambda. -Originally taken from https://gitlab.com/hadrien/aws_lambda_logging/ -""" -import json -import logging - - -def json_formatter(obj): - """Formatter for unserialisable values.""" - return str(obj) - - -class JsonFormatter(logging.Formatter): - """AWS Lambda Logging formatter. - - Formats the log message as a JSON encoded string. If the message is a - dict it will be used directly. If the message can be parsed as JSON, then - the parse d value is used in the output record. - """ - - def __init__(self, **kwargs): - """Return a JsonFormatter instance. - - The `json_default` kwarg is used to specify a formatter for otherwise - unserialisable values. It must not throw. Defaults to a function that - coerces the value to a string. - - Other kwargs are used to specify log field format strings. - """ - datefmt = kwargs.pop("datefmt", None) - - super(JsonFormatter, self).__init__(datefmt=datefmt) - self.format_dict = { - "timestamp": "%(asctime)s", - "level": "%(levelname)s", - "location": "%(name)s.%(funcName)s:%(lineno)d", - } - self.format_dict.update(kwargs) - self.default_json_formatter = kwargs.pop("json_default", json_formatter) - - def format(self, record): # noqa: A003 - record_dict = record.__dict__.copy() - record_dict["asctime"] = self.formatTime(record, self.datefmt) - - log_dict = {k: v % record_dict for k, v in self.format_dict.items() if v} - - if isinstance(record_dict["msg"], dict): - log_dict["message"] = record_dict["msg"] - else: - log_dict["message"] = record.getMessage() - - # Attempt to decode the message as JSON, if so, merge it with the - # overall message for clarity. - try: - log_dict["message"] = json.loads(log_dict["message"]) - except (TypeError, ValueError): - pass - - if record.exc_info: - # Cache the traceback text to avoid converting it multiple times - # (it's constant anyway) - # from logging.Formatter:format - if not record.exc_text: - record.exc_text = self.formatException(record.exc_info) - - if record.exc_text: - log_dict["exception"] = record.exc_text - - json_record = json.dumps(log_dict, default=self.default_json_formatter) - - if hasattr(json_record, "decode"): # pragma: no cover - json_record = json_record.decode("utf-8") - - return json_record - - -def setup(level="DEBUG", formatter_cls=JsonFormatter, boto_level=None, **kwargs): - """Overall Metadata Formatting.""" - if formatter_cls: # pragma: no cover - for handler in logging.root.handlers: - handler.setFormatter(formatter_cls(**kwargs)) - - try: - logging.root.setLevel(level) - except ValueError: - logging.root.error("Invalid log level: %s", level) - level = "INFO" - logging.root.setLevel(level) - - if not boto_level: - boto_level = level - - try: # pragma: no cover - logging.getLogger("boto").setLevel(boto_level) - logging.getLogger("boto3").setLevel(boto_level) - logging.getLogger("botocore").setLevel(boto_level) - except ValueError: # pragma: no cover - logging.root.error("Invalid log level: %s", boto_level) diff --git a/python/aws_lambda_powertools/logging/exceptions.py b/python/aws_lambda_powertools/logging/exceptions.py new file mode 100644 index 00000000000..65b30906edf --- /dev/null +++ b/python/aws_lambda_powertools/logging/exceptions.py @@ -0,0 +1,2 @@ +class InvalidLoggerSamplingRateError(Exception): + pass diff --git a/python/aws_lambda_powertools/logging/logger.py b/python/aws_lambda_powertools/logging/logger.py index 32dc1be6c1a..02c0e912b12 100644 --- a/python/aws_lambda_powertools/logging/logger.py +++ b/python/aws_lambda_powertools/logging/logger.py @@ -1,160 +1,155 @@ +import copy import functools import itertools +import json import logging import os import random +import sys import warnings from distutils.util import strtobool -from typing import Any, Callable, Dict +from typing import Any, Callable, Dict, Union from ..helper.models import MetricUnit, build_lambda_context_model, build_metric_unit_from_str -from . import aws_lambda_logging +from .exceptions import InvalidLoggerSamplingRateError logger = logging.getLogger(__name__) -logger.setLevel(os.getenv("LOG_LEVEL", "INFO")) is_cold_start = True -def logger_setup(service: str = "service_undefined", level: str = "INFO", sampling_rate: float = 0.0, **kwargs): - """Setups root logger to format statements in JSON. +def json_formatter(unserialized_value: Any): + """JSON custom serializer to cast unserialisable values to strings. - Includes service name and any additional key=value into logs - It also accepts both service name or level explicitly via env vars + Example + ------- - Environment variables - --------------------- - POWERTOOLS_SERVICE_NAME : str - service name - LOG_LEVEL: str - logging level (e.g. INFO, DEBUG) - POWERTOOLS_LOGGER_SAMPLE_RATE: float - samping rate ranging from 0 to 1, 1 being 100% sampling + **Serialize unserialisable value to string** + + class X: pass + value = {"x": X()} + + json.dumps(value, default=json_formatter) Parameters ---------- - service : str, optional - service name to be appended in logs, by default "service_undefined" - level : str, optional - logging.level, by default "INFO" - sample_rate: float, optional - sample rate for debug calls within execution context defaults to 0 + unserialized_value: Any + Python object unserializable by JSON + """ + return str(unserialized_value) - Example - ------- - Setups structured logging in JSON for Lambda functions with explicit service name - >>> from aws_lambda_powertools.logging import logger_setup - >>> logger = logger_setup(service="payment") - >>> - >>> def handler(event, context): - logger.info("Hello") +class JsonFormatter(logging.Formatter): + """AWS Lambda Logging formatter. - Setups structured logging in JSON for Lambda functions using env vars + Formats the log message as a JSON encoded string. If the message is a + dict it will be used directly. If the message can be parsed as JSON, then + the parse d value is used in the output record. - $ export POWERTOOLS_SERVICE_NAME="payment" - $ export POWERTOOLS_LOGGER_SAMPLE_RATE=0.01 # 1% debug sampling - >>> from aws_lambda_powertools.logging import logger_setup - >>> logger = logger_setup() - >>> - >>> def handler(event, context): - logger.info("Hello") + Originally taken from https://gitlab.com/hadrien/aws_lambda_logging/ """ - service = os.getenv("POWERTOOLS_SERVICE_NAME") or service - sampling_rate = os.getenv("POWERTOOLS_LOGGER_SAMPLE_RATE") or sampling_rate - log_level = os.getenv("LOG_LEVEL") or level - logger = logging.getLogger(name=service) - - try: - if sampling_rate and random.random() <= float(sampling_rate): - log_level = logging.DEBUG - except ValueError: - raise ValueError( - "fExpected a float value ranging 0 to 1, but received {sampling_rate} instead. Please review POWERTOOLS_LOGGER_SAMPLE_RATE environment variable." # noqa E501 - ) - logger.setLevel(log_level) + def __init__(self, **kwargs): + """Return a JsonFormatter instance. - # Patch logger by structuring its outputs as JSON - aws_lambda_logging.setup(level=log_level, service=service, sampling_rate=sampling_rate, **kwargs) + The `json_default` kwarg is used to specify a formatter for otherwise + unserialisable values. It must not throw. Defaults to a function that + coerces the value to a string. - return logger + Other kwargs are used to specify log field format strings. + """ + datefmt = kwargs.pop("datefmt", None) + super(JsonFormatter, self).__init__(datefmt=datefmt) + self.format_dict = { + "timestamp": "%(asctime)s", + "level": "%(levelname)s", + "location": "%(funcName)s:%(lineno)d", + } + self.format_dict.update(kwargs) + self.default_json_formatter = kwargs.pop("json_default", json_formatter) -def logger_inject_lambda_context(lambda_handler: Callable[[Dict, Any], Any] = None, log_event: bool = False): - """Decorator to capture Lambda contextual info and inject into struct logging + def format(self, record): # noqa: A003 + record_dict = record.__dict__.copy() + record_dict["asctime"] = self.formatTime(record, self.datefmt) - Parameters - ---------- - log_event : bool, optional - Instructs logger to log Lambda Event, by default False + log_dict = {} + for key, value in self.format_dict.items(): + if value: + # converts default logging expr to its record value + # e.g. '%(asctime)s' to '2020-04-24 09:35:40,698' + log_dict[key] = value % record_dict - Environment variables - --------------------- - POWERTOOLS_LOGGER_LOG_EVENT : str - instruct logger to log Lambda Event (e.g. `"true", "True", "TRUE"`) + if isinstance(record_dict["msg"], dict): + log_dict["message"] = record_dict["msg"] + else: + log_dict["message"] = record.getMessage() - Example - ------- - **Captures Lambda contextual runtime info (e.g memory, arn, req_id)** + # Attempt to decode the message as JSON, if so, merge it with the + # overall message for clarity. + try: + log_dict["message"] = json.loads(log_dict["message"]) + except (json.decoder.JSONDecodeError, TypeError, ValueError): + pass - from aws_lambda_powertools.logging import logger_setup, logger_inject_lambda_context - import logging + if record.exc_info: + # Cache the traceback text to avoid converting it multiple times + # (it's constant anyway) + # from logging.Formatter:format + if not record.exc_text: + record.exc_text = self.formatException(record.exc_info) - logger = logging.getLogger(__name__) - logging.setLevel(logging.INFO) - logger_setup() + if record.exc_text: + log_dict["exception"] = record.exc_text - @logger_inject_lambda_context - def handler(event, context): - logger.info("Hello") + json_record = json.dumps(log_dict, default=self.default_json_formatter) - **Captures Lambda contextual runtime info and logs incoming request** + if hasattr(json_record, "decode"): # pragma: no cover + json_record = json_record.decode("utf-8") - from aws_lambda_powertools.logging import logger_setup, logger_inject_lambda_context - import logging + return json_record - logger = logging.getLogger(__name__) - logging.setLevel(logging.INFO) - logger_setup() - @logger_inject_lambda_context(log_event=True) - def handler(event, context): - logger.info("Hello") +def logger_setup( + service: str = None, level: str = None, sampling_rate: float = 0.0, legacy: bool = False, **kwargs +) -> DeprecationWarning: + """DEPRECATED - Returns + This will be removed when GA - Use `aws_lambda_powertools.logging.logger.Logger` instead + + Example ------- - decorate : Callable - Decorated lambda handler - """ + **Logger class - Same UX** - # If handler is None we've been called with parameters - # Return a partial function with args filled - if lambda_handler is None: - logger.debug("Decorator called with parameters") - return functools.partial(logger_inject_lambda_context, log_event=log_event) + from aws_lambda_powertools.logging import Logger + logger = Logger(service="payment") # same env var still applies - log_event_env_option = str(os.getenv("POWERTOOLS_LOGGER_LOG_EVENT", "false")) - log_event = strtobool(log_event_env_option) or log_event + """ + raise DeprecationWarning("Use Logger instead - This method will be removed when GA") - @functools.wraps(lambda_handler) - def decorate(event, context): - if log_event: - logger.debug("Event received") - logger.info(event) - lambda_context = build_lambda_context_model(context) - cold_start = __is_cold_start() +def logger_inject_lambda_context( + lambda_handler: Callable[[Dict, Any], Any] = None, log_event: bool = False +) -> DeprecationWarning: + """DEPRECATED - logger_setup(cold_start=cold_start, **lambda_context.__dict__) + This will be removed when GA - Use `aws_lambda_powertools.logging.logger.Logger` instead - return lambda_handler(event, context) + Example + ------- + **Logger class - Same UX** - return decorate + from aws_lambda_powertools.logging import Logger + logger = Logger(service="payment") # same env var still applies + @logger.inject_lambda_context + def handler(evt, ctx): + pass + """ + raise DeprecationWarning("Use Logger instead - This method will be removed when GA") -def __is_cold_start() -> str: +def _is_cold_start() -> str: """Verifies whether is cold start and return a string used for struct logging Returns @@ -278,3 +273,231 @@ def __build_dimensions(**dimensions) -> str: dimension = ",".join(dimensions_list) return dimension + + +class Logger(logging.Logger): + """Creates and setups a logger to format statements in JSON. + + Includes service name and any additional key=value into logs + It also accepts both service name or level explicitly via env vars + + Environment variables + --------------------- + POWERTOOLS_SERVICE_NAME : str + service name + LOG_LEVEL: str, int + logging level (e.g. INFO, DEBUG) + POWERTOOLS_LOGGER_SAMPLE_RATE: float + samping rate ranging from 0 to 1, 1 being 100% sampling + + Parameters + ---------- + service : str, optional + service name to be appended in logs, by default "service_undefined" + level : str, optional + logging.level, by default "INFO" + sample_rate: float, optional + sample rate for debug calls within execution context defaults to 0.0 + stream: sys.stdout, optional + valid output for a logging stream, by default sys.stdout + + Example + ------- + **Setups structured logging in JSON for Lambda functions with explicit service name** + + >>> from aws_lambda_powertools.logging import Logger + >>> logger = Logger(service="payment") + >>> + >>> def handler(event, context): + logger.info("Hello") + + **Setups structured logging in JSON for Lambda functions using env vars** + + $ export POWERTOOLS_SERVICE_NAME="payment" + $ export POWERTOOLS_LOGGER_SAMPLE_RATE=0.01 # 1% debug sampling + >>> from aws_lambda_powertools.logging import Logger + >>> logger = Logger() + >>> + >>> def handler(event, context): + logger.info("Hello") + + **Append payment_id to previously setup structured log logger** + + >>> from aws_lambda_powertools.logging import Logger + >>> logger = Logger(service="payment") + >>> + >>> def handler(event, context): + logger.structure_logs(append=True, payment_id=event["payment_id"]) + logger.info("Hello") + + Parameters + ---------- + logging : logging.Logger + Inherits Logger + service: str + name of the service to create the logger for, "service_undefined" by default + level: str, int + log level, INFO by default + sampling_rate: float + debug log sampling rate, 0.0 by default + stream: sys.stdout + log stream, stdout by default + + Raises + ------ + InvalidLoggerSamplingRateError + When sampling rate provided is not a float + """ + + def __init__( + self, + service: str = None, + level: Union[str, int] = None, + sampling_rate: float = None, + stream: sys.stdout = None, + **kwargs, + ): + self.service = service or os.getenv("POWERTOOLS_SERVICE_NAME") or "service_undefined" + self.sampling_rate = sampling_rate or os.getenv("POWERTOOLS_LOGGER_SAMPLE_RATE") or 0.0 + self.log_level = level or os.getenv("LOG_LEVEL") or logging.INFO + self.handler = logging.StreamHandler(stream) if stream is not None else logging.StreamHandler(sys.stdout) + self._default_log_keys = {"service": self.service, "sampling_rate": self.sampling_rate} + self.log_keys = copy.copy(self._default_log_keys) + + super().__init__(name=self.service, level=self.log_level) + + try: + if self.sampling_rate and random.random() <= float(self.sampling_rate): + logger.debug("Setting log level to Debug due to sampling rate") + self.log_level = logging.DEBUG + except ValueError: + raise InvalidLoggerSamplingRateError( + f"Expected a float value ranging 0 to 1, but received {self.sampling_rate} instead. Please review POWERTOOLS_LOGGER_SAMPLE_RATE environment variable." # noqa E501 + ) + + self.setLevel(self.log_level) + self.structure_logs(**kwargs) + self.addHandler(self.handler) + + def inject_lambda_context(self, lambda_handler: Callable[[Dict, Any], Any] = None, log_event: bool = False): + """Decorator to capture Lambda contextual info and inject into struct logging + + Parameters + ---------- + log_event : bool, optional + Instructs logger to log Lambda Event, by default False + + Environment variables + --------------------- + POWERTOOLS_LOGGER_LOG_EVENT : str + instruct logger to log Lambda Event (e.g. `"true", "True", "TRUE"`) + + Example + ------- + **Captures Lambda contextual runtime info (e.g memory, arn, req_id)** + + from aws_lambda_powertools.logging import Logger + + logger = Logger(service="payment") + + @logger.inject_lambda_context + def handler(event, context): + logger.info("Hello") + + **Captures Lambda contextual runtime info and logs incoming request** + + from aws_lambda_powertools.logging import Logger + + logger = Logger(service="payment") + + @logger.inject_lambda_context(log_event=True) + def handler(event, context): + logger.info("Hello") + + Returns + ------- + decorate : Callable + Decorated lambda handler + """ + + # If handler is None we've been called with parameters + # Return a partial function with args filled + if lambda_handler is None: + logger.debug("Decorator called with parameters") + return functools.partial(self.inject_lambda_context, log_event=log_event) + + log_event_env_option = str(os.getenv("POWERTOOLS_LOGGER_LOG_EVENT", "false")) + log_event = strtobool(log_event_env_option) or log_event + + @functools.wraps(lambda_handler) + def decorate(event, context): + if log_event: + logger.debug("Event received") + self.info(event) + + lambda_context = build_lambda_context_model(context) + cold_start = _is_cold_start() + + self.structure_logs(cold_start=cold_start, **lambda_context.__dict__) + return lambda_handler(event, context) + + return decorate + + def structure_logs(self, append: bool = False, **kwargs): + """Sets logging formatting to JSON. + + Optionally, it can append keyword arguments + to an existing logger so it is available + across future log statements. + + Last keyword argument and value wins if duplicated. + + Parameters + ---------- + append : bool, optional + [description], by default False + """ + self.handler.setFormatter(JsonFormatter(**self._default_log_keys, **kwargs)) + + if append: + new_keys = {**self.log_keys, **kwargs} + self.handler.setFormatter(JsonFormatter(**new_keys)) + + self.log_keys.update(**kwargs) + + +def set_package_logger( + level: Union[str, int] = logging.DEBUG, stream: sys.stdout = None, formatter: logging.Formatter = None +): + """Set an additional stream handler, formatter, and log level for aws_lambda_powertools package logger. + + **Package log by default is supressed (NullHandler), this should only used for debugging. + This is separate from application Logger class utility** + + Example + ------- + **Enables debug logging for AWS Lambda Powertools package** + + >>> from aws_lambda_powertools.logging.logger import set_package_logger + >>> set_package_logger() + + Parameters + ---------- + level: str, int + log level, DEBUG by default + stream: sys.stdout + log stream, stdout by default + formatter: logging.Formatter + log formatter, "%(asctime)s %(name)s [%(levelname)s] %(message)s" by default + """ + if formatter is None: + formatter = logging.Formatter("%(asctime)s %(name)s [%(levelname)s] %(message)s") + + if stream is None: + stream = sys.stdout + + logger = logging.getLogger("aws_lambda_powertools") + logger.setLevel(level) + handler = logging.StreamHandler(stream) + handler.setFormatter(formatter) + logger.addHandler(handler) diff --git a/python/aws_lambda_powertools/metrics/base.py b/python/aws_lambda_powertools/metrics/base.py index 448bfc37e02..8d561abf71d 100644 --- a/python/aws_lambda_powertools/metrics/base.py +++ b/python/aws_lambda_powertools/metrics/base.py @@ -13,12 +13,13 @@ from .exceptions import MetricUnitError, MetricValueError, SchemaValidationError, UniqueNamespaceError logger = logging.getLogger(__name__) -logger.setLevel(os.getenv("LOG_LEVEL", "INFO")) _schema_path = pathlib.Path(__file__).parent / "./schema.json" with _schema_path.open() as f: CLOUDWATCH_EMF_SCHEMA = json.load(f) +MAX_METRICS = 100 + class MetricManager: """Base class for metric functionality (namespace, metric, dimension, serialization) @@ -101,12 +102,6 @@ def add_metric(self, name: str, unit: MetricUnit, value: Union[float, int]): MetricUnitError When metric unit is not supported by CloudWatch """ - if len(self.metric_set) == 100: - logger.debug("Exceeded maximum of 100 metrics - Publishing existing metric set") - metrics = self.serialize_metric_set() - print(json.dumps(metrics)) - self.metric_set = {} - if not isinstance(value, numbers.Number): raise MetricValueError(f"{value} is not a valid number") @@ -121,6 +116,12 @@ def add_metric(self, name: str, unit: MetricUnit, value: Union[float, int]): logger.debug(f"Adding metric: {name} with {metric}") self.metric_set[name] = metric + if len(self.metric_set) == MAX_METRICS: + logger.debug(f"Exceeded maximum of {MAX_METRICS} metrics - Publishing existing metric set") + metrics = self.serialize_metric_set() + print(json.dumps(metrics)) + self.metric_set = {} + def serialize_metric_set(self, metrics: Dict = None, dimensions: Dict = None) -> Dict: """Serializes metric and dimensions set @@ -149,10 +150,10 @@ def serialize_metric_set(self, metrics: Dict = None, dimensions: Dict = None) -> SchemaValidationError Raised when serialization fail schema validation """ - if metrics is None: + if metrics is None: # pragma: no cover metrics = self.metric_set - if dimensions is None: + if dimensions is None: # pragma: no cover dimensions = self.dimension_set logger.debug("Serializing...", {"metrics": metrics, "dimensions": dimensions}) @@ -164,11 +165,10 @@ def serialize_metric_set(self, metrics: Dict = None, dimensions: Dict = None) -> for metric_name in metrics: metric: str = metrics[metric_name] metric_value: int = metric.get("Value", 0) - metric_unit: str = metric.get("Unit") + metric_unit: str = metric.get("Unit", "") - if metric_value > 0 and metric_unit is not None: - metric_names_unit.append({"Name": metric_name, "Unit": metric["Unit"]}) - metric_set.update({metric_name: metric["Value"]}) + metric_names_unit.append({"Name": metric_name, "Unit": metric_unit}) + metric_set.update({metric_name: metric_value}) metrics_definition = { "CloudWatchMetrics": [ diff --git a/python/aws_lambda_powertools/metrics/metric.py b/python/aws_lambda_powertools/metrics/metric.py index 85ce527e576..05a8d4ce76b 100644 --- a/python/aws_lambda_powertools/metrics/metric.py +++ b/python/aws_lambda_powertools/metrics/metric.py @@ -1,6 +1,5 @@ import json import logging -import os from contextlib import contextmanager from typing import Dict @@ -8,7 +7,6 @@ from aws_lambda_powertools.metrics.base import MetricManager logger = logging.getLogger(__name__) -logger.setLevel(os.getenv("LOG_LEVEL", "INFO")) class SingleMetric(MetricManager): @@ -113,8 +111,6 @@ def single_metric(name: str, unit: MetricUnit, value: float): yield metric logger.debug("Serializing single metric") metric_set: Dict = metric.serialize_metric_set() - except Exception as e: - raise e finally: logger.debug("Publishing single metric", {"metric": metric}) print(json.dumps(metric_set)) diff --git a/python/aws_lambda_powertools/metrics/metrics.py b/python/aws_lambda_powertools/metrics/metrics.py index 24d8f2b93a1..390356b1461 100644 --- a/python/aws_lambda_powertools/metrics/metrics.py +++ b/python/aws_lambda_powertools/metrics/metrics.py @@ -1,13 +1,11 @@ import functools import json import logging -import os from typing import Any, Callable from aws_lambda_powertools.metrics.base import MetricManager logger = logging.getLogger(__name__) -logger.setLevel(os.getenv("LOG_LEVEL", "INFO")) class Metrics(MetricManager): @@ -101,8 +99,6 @@ def handler(event, context) def decorate(*args, **kwargs): try: response = lambda_handler(*args, **kwargs) - except Exception as e: - raise e finally: metrics = self.serialize_metric_set() logger.debug("Publishing metrics", {"metrics": metrics}) diff --git a/python/aws_lambda_powertools/middleware_factory/exceptions.py b/python/aws_lambda_powertools/middleware_factory/exceptions.py new file mode 100644 index 00000000000..55d5b2342bb --- /dev/null +++ b/python/aws_lambda_powertools/middleware_factory/exceptions.py @@ -0,0 +1,2 @@ +class MiddlewareInvalidArgumentError(Exception): + pass diff --git a/python/aws_lambda_powertools/middleware_factory/factory.py b/python/aws_lambda_powertools/middleware_factory/factory.py index 4dcab2adf33..43c8e5ad9fa 100644 --- a/python/aws_lambda_powertools/middleware_factory/factory.py +++ b/python/aws_lambda_powertools/middleware_factory/factory.py @@ -6,9 +6,9 @@ from typing import Callable from ..tracing import Tracer +from .exceptions import MiddlewareInvalidArgumentError logger = logging.getLogger(__name__) -logger.setLevel(os.getenv("LOG_LEVEL", "INFO")) def lambda_handler_decorator(decorator: Callable = None, trace_execution=False): @@ -97,7 +97,7 @@ def lambda_handler(event, context): Raises ------ - TypeError + MiddlewareInvalidArgumentError When middleware receives non keyword=arguments """ @@ -113,7 +113,8 @@ def final_decorator(func: Callable = None, **kwargs): return functools.partial(final_decorator, **kwargs) if not inspect.isfunction(func): - raise TypeError( + # @custom_middleware(True) vs @custom_middleware(log_event=True) + raise MiddlewareInvalidArgumentError( f"Only keyword arguments is supported for middlewares: {decorator.__qualname__} received {func}" ) @@ -129,9 +130,9 @@ def wrapper(event, context): else: response = middleware() return response - except Exception as err: - logger.error(f"Caught exception in {decorator.__qualname__}") - raise err + except Exception: + logger.exception(f"Caught exception in {decorator.__qualname__}") + raise return wrapper diff --git a/python/aws_lambda_powertools/tracing/tracer.py b/python/aws_lambda_powertools/tracing/tracer.py index 0f3e3cff8bb..19cc319492f 100644 --- a/python/aws_lambda_powertools/tracing/tracer.py +++ b/python/aws_lambda_powertools/tracing/tracer.py @@ -9,7 +9,6 @@ is_cold_start = True logger = logging.getLogger(__name__) -logger.setLevel(os.getenv("LOG_LEVEL", "INFO")) class Tracer: @@ -175,9 +174,9 @@ def decorate(event, context): if response: self.put_metadata("lambda handler response", response) except Exception as err: - logger.debug("Exception received from lambda handler") + logger.exception("Exception received from lambda handler", exc_info=True) self.put_metadata(f"{self.service}_error", err) - raise err + raise finally: self.end_subsegment() @@ -223,9 +222,9 @@ def decorate(*args, **kwargs): if response is not None: self.put_metadata(f"{method_name} response", response) except Exception as err: - logger.debug(f"Exception received from '{method_name}'' method") + logger.exception(f"Exception received from '{method_name}'' method", exc_info=True) self.put_metadata(f"{method_name} error", err) - raise err + raise finally: self.end_subsegment() @@ -344,17 +343,10 @@ def patch(self): """Patch modules for instrumentation""" logger.debug("Patching modules...") - is_lambda_emulator = os.getenv("AWS_SAM_LOCAL", False) - is_lambda_env = os.getenv("LAMBDA_TASK_ROOT", False) - if self.disabled: logger.debug("Tracing has been disabled, aborting patch") return - if is_lambda_emulator or is_lambda_env: - logger.debug("Running under SAM CLI env or not in Lambda; aborting patch") - return - patch_all() # pragma: no cover def __disable_tracing_provider(self): @@ -369,7 +361,7 @@ def __is_trace_disabled(self) -> bool: Tracing is automatically disabled in the following conditions: 1. Explicitly disabled via `TRACE_DISABLED` environment variable - 2. Running in Lambda Emulators where X-Ray Daemon will not be listening + 2. Running in Lambda Emulators, or locally where X-Ray Daemon will not be listening 3. Explicitly disabled via constructor e.g `Tracer(disabled=True)` Returns @@ -377,7 +369,7 @@ def __is_trace_disabled(self) -> bool: bool """ logger.debug("Verifying whether Tracing has been disabled") - is_lambda_emulator = os.getenv("AWS_SAM_LOCAL") + is_lambda_sam_cli = os.getenv("AWS_SAM_LOCAL") env_option = str(os.getenv("POWERTOOLS_TRACE_DISABLED", "false")) disabled_env = strtobool(env_option) @@ -385,9 +377,9 @@ def __is_trace_disabled(self) -> bool: logger.debug("Tracing has been disabled via env var POWERTOOLS_TRACE_DISABLED") return disabled_env - if is_lambda_emulator: - logger.debug("Running under SAM CLI env; Tracing has been disabled") - return is_lambda_emulator + if is_lambda_sam_cli: + logger.debug("Running under SAM CLI env or not in Lambda env; disabling Tracing") + return True return False diff --git a/python/pyproject.toml b/python/pyproject.toml index 3360e7fd7ed..8b21c9dce86 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "aws_lambda_powertools" -version = "0.7.0" +version = "0.8.0" description = "Python utilities for AWS Lambda functions including but not limited to tracing, logging and custom metric" authors = ["Amazon Web Services"] classifiers=[ diff --git a/python/tests/functional/test_aws_lambda_logging.py b/python/tests/functional/test_aws_lambda_logging.py index 6d5efd492a7..4a4513134dc 100644 --- a/python/tests/functional/test_aws_lambda_logging.py +++ b/python/tests/functional/test_aws_lambda_logging.py @@ -3,51 +3,60 @@ import json import logging -from pytest import fixture, mark, yield_fixture +import pytest -from aws_lambda_powertools.logging.aws_lambda_logging import setup +from aws_lambda_powertools.logging.logger import Logger -@fixture +@pytest.fixture def stdout(): return io.StringIO() -@fixture +@pytest.fixture def handler(stdout): return logging.StreamHandler(stdout) -@fixture +@pytest.fixture def logger(): return logging.getLogger(__name__) -@yield_fixture +@pytest.fixture def root_logger(handler): logging.root.addHandler(handler) yield logging.root logging.root.removeHandler(handler) -@mark.parametrize("level", ["DEBUG", "WARNING", "ERROR", "INFO", "CRITICAL"]) -def test_setup_with_valid_log_levels(root_logger, logger, stdout, level): - setup(level, request_id="request id!", another="value") +@pytest.mark.parametrize("level", ["DEBUG", "WARNING", "ERROR", "INFO", "CRITICAL"]) +def test_setup_with_valid_log_levels(root_logger, stdout, level): + logger = Logger(level=level, stream=stdout, request_id="request id!", another="value") + msg = "This is a test" + log_command = { + "INFO": logger.info, + "ERROR": logger.error, + "WARNING": logger.warning, + "DEBUG": logger.debug, + "CRITICAL": logger.critical, + } - logger.critical("This is a test") + log_message = log_command[level] + log_message(msg) - log_dict = json.loads(stdout.getvalue()) + log_dict = json.loads(stdout.getvalue().strip()) check_log_dict(log_dict) - assert "CRITICAL" == log_dict["level"] + assert level == log_dict["level"] assert "This is a test" == log_dict["message"] assert "request id!" == log_dict["request_id"] assert "exception" not in log_dict -def test_logging_exception_traceback(root_logger, logger, stdout): - setup("DEBUG", request_id="request id!", another="value") +def test_logging_exception_traceback(root_logger, stdout): + logger = Logger(level="DEBUG", stream=stdout, request_id="request id!", another="value") try: raise Exception("Boom") @@ -61,11 +70,9 @@ def test_logging_exception_traceback(root_logger, logger, stdout): def test_setup_with_invalid_log_level(root_logger, logger, stdout): - setup("not a valid log level") # writes a log event - - log_dict = json.loads(stdout.getvalue()) - - check_log_dict(log_dict) + with pytest.raises(ValueError) as e: + Logger(level="not a valid log level") + assert "Unknown level" in e.value.args[0] def check_log_dict(log_dict): @@ -76,11 +83,11 @@ def check_log_dict(log_dict): def test_setup_with_bad_level_does_not_fail(): - setup("DBGG", request_id="request id!", another="value") + Logger("DBGG", request_id="request id!", another="value") -def test_with_dict_message(root_logger, logger, stdout): - setup("DEBUG", another="value") +def test_with_dict_message(root_logger, stdout): + logger = Logger(level="DEBUG", stream=stdout) msg = {"x": "isx"} logger.critical(msg) @@ -90,25 +97,25 @@ def test_with_dict_message(root_logger, logger, stdout): assert msg == log_dict["message"] -def test_with_json_message(root_logger, logger, stdout): - setup("DEBUG", another="value") +def test_with_json_message(root_logger, stdout): + logger = Logger(stream=stdout) msg = {"x": "isx"} - logger.critical(json.dumps(msg)) + logger.info(json.dumps(msg)) log_dict = json.loads(stdout.getvalue()) assert msg == log_dict["message"] -def test_with_unserialisable_value_in_message(root_logger, logger, stdout): - setup("DEBUG", another="value") +def test_with_unserialisable_value_in_message(root_logger, stdout): + logger = Logger(level="DEBUG", stream=stdout) class X: pass msg = {"x": X()} - logger.critical(msg) + logger.debug(msg) log_dict = json.loads(stdout.getvalue()) diff --git a/python/tests/functional/test_logger.py b/python/tests/functional/test_logger.py index db8a09e8d57..c1a882f3320 100644 --- a/python/tests/functional/test_logger.py +++ b/python/tests/functional/test_logger.py @@ -5,7 +5,10 @@ import pytest -from aws_lambda_powertools.logging import MetricUnit, log_metric, logger_inject_lambda_context, logger_setup +from aws_lambda_powertools.logging import Logger, MetricUnit, log_metric, logger_inject_lambda_context, logger_setup +from aws_lambda_powertools.logging.exceptions import InvalidLoggerSamplingRateError +from aws_lambda_powertools.logging.logger import JsonFormatter, set_package_logger +from aws_lambda_powertools.tracing import Tracer @pytest.fixture @@ -42,40 +45,40 @@ def test_setup_service_name(root_logger, stdout): # WHEN logger is setup # THEN service field should be equals service given service_name = "payment" - logger = logger_setup(service=service_name) + logger = Logger(service=service_name, stream=stdout) + logger.info("Hello") log = json.loads(stdout.getvalue()) assert service_name == log["service"] -def test_setup_no_service_name(root_logger, stdout): +def test_setup_no_service_name(stdout): # GIVEN no service is explicitly defined # WHEN logger is setup # THEN service field should be "service_undefined" - logger_setup() - logger = logger_setup() + logger = Logger(stream=stdout) logger.info("Hello") log = json.loads(stdout.getvalue()) assert "service_undefined" == log["service"] -def test_setup_service_env_var(monkeypatch, root_logger, stdout): +def test_setup_service_env_var(monkeypatch, stdout): # GIVEN service is explicitly defined via POWERTOOLS_SERVICE_NAME env # WHEN logger is setup # THEN service field should be equals POWERTOOLS_SERVICE_NAME value service_name = "payment" monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", service_name) - logger = logger_setup() + logger = Logger(stream=stdout) logger.info("Hello") log = json.loads(stdout.getvalue()) assert service_name == log["service"] -def test_setup_sampling_rate(monkeypatch, root_logger, stdout): +def test_setup_sampling_rate(monkeypatch, stdout): # GIVEN samping rate is explicitly defined via POWERTOOLS_LOGGER_SAMPLE_RATE env # WHEN logger is setup # THEN sampling rate should be equals POWERTOOLS_LOGGER_SAMPLE_RATE value and should sample debug logs @@ -84,7 +87,7 @@ def test_setup_sampling_rate(monkeypatch, root_logger, stdout): monkeypatch.setenv("POWERTOOLS_LOGGER_SAMPLE_RATE", sampling_rate) monkeypatch.setenv("LOG_LEVEL", "INFO") - logger = logger_setup() + logger = Logger(stream=stdout) logger.debug("I am being sampled") log = json.loads(stdout.getvalue()) @@ -93,7 +96,7 @@ def test_setup_sampling_rate(monkeypatch, root_logger, stdout): assert "I am being sampled" == log["message"] -def test_inject_lambda_context(root_logger, stdout, lambda_context): +def test_inject_lambda_context(lambda_context, stdout): # GIVEN a lambda function is decorated with logger # WHEN logger is setup # THEN lambda contextual info should always be in the logs @@ -104,9 +107,9 @@ def test_inject_lambda_context(root_logger, stdout, lambda_context): "function_request_id", ) - logger = logger_setup() + logger = Logger(stream=stdout) - @logger_inject_lambda_context + @logger.inject_lambda_context def handler(event, context): logger.info("Hello") @@ -118,15 +121,16 @@ def handler(event, context): assert key in log -def test_inject_lambda_context_log_event_request(root_logger, stdout, lambda_context): +def test_inject_lambda_context_log_event_request(lambda_context, stdout): # GIVEN a lambda function is decorated with logger instructed to log event # WHEN logger is setup # THEN logger should log event received from Lambda lambda_event = {"greeting": "hello"} - logger = logger_setup() + logger = Logger(stream=stdout) - @logger_inject_lambda_context(log_event=True) + @logger.inject_lambda_context(log_event=True) + # @logger.inject_lambda_context(log_event=True) def handler(event, context): logger.info("Hello") @@ -134,13 +138,12 @@ def handler(event, context): # Given that our string buffer has many log statements separated by newline \n # We need to clean it before we can assert on - stdout.seek(0) - logs = [json.loads(line.strip()) for line in stdout.readlines()] + logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] logged_event, _ = logs assert "greeting" in logged_event["message"] -def test_inject_lambda_context_log_event_request_env_var(monkeypatch, root_logger, stdout, lambda_context): +def test_inject_lambda_context_log_event_request_env_var(monkeypatch, lambda_context, stdout): # GIVEN a lambda function is decorated with logger instructed to log event # via POWERTOOLS_LOGGER_LOG_EVENT env # WHEN logger is setup @@ -148,9 +151,9 @@ def test_inject_lambda_context_log_event_request_env_var(monkeypatch, root_logge lambda_event = {"greeting": "hello"} monkeypatch.setenv("POWERTOOLS_LOGGER_LOG_EVENT", "true") - logger = logger_setup() + logger = Logger(stream=stdout) - @logger_inject_lambda_context + @logger.inject_lambda_context def handler(event, context): logger.info("Hello") @@ -158,8 +161,7 @@ def handler(event, context): # Given that our string buffer has many log statements separated by newline \n # We need to clean it before we can assert on - stdout.seek(0) - logs = [json.loads(line.strip()) for line in stdout.readlines()] + logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] event = {} for log in logs: @@ -169,15 +171,15 @@ def handler(event, context): assert event == lambda_event -def test_inject_lambda_context_log_no_request_by_default(monkeypatch, root_logger, stdout, lambda_context): +def test_inject_lambda_context_log_no_request_by_default(monkeypatch, lambda_context, stdout): # GIVEN a lambda function is decorated with logger # WHEN logger is setup # THEN logger should not log event received by lambda handler lambda_event = {"greeting": "hello"} - logger = logger_setup() + logger = Logger(stream=stdout) - @logger_inject_lambda_context + @logger.inject_lambda_context def handler(event, context): logger.info("Hello") @@ -185,8 +187,7 @@ def handler(event, context): # Given that our string buffer has many log statements separated by newline \n # We need to clean it before we can assert on - stdout.seek(0) - logs = [json.loads(line.strip()) for line in stdout.readlines()] + logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] event = {} for log in logs: @@ -196,7 +197,7 @@ def handler(event, context): assert event != lambda_event -def test_inject_lambda_cold_start(root_logger, stdout, lambda_context): +def test_inject_lambda_cold_start(lambda_context, stdout): # GIVEN a lambda function is decorated with logger, and called twice # WHEN logger is setup # THEN cold_start key should only be true in the first call @@ -208,12 +209,12 @@ def test_inject_lambda_cold_start(root_logger, stdout, lambda_context): # # since Lambda will only import our logger lib once per concurrent execution logger.is_cold_start = True - logger = logger_setup() + logger = Logger(stream=stdout) def custom_method(): logger.info("Hello from method") - @logger_inject_lambda_context + @logger.inject_lambda_context def handler(event, context): custom_method() logger.info("Hello") @@ -223,8 +224,7 @@ def handler(event, context): # Given that our string buffer has many log statements separated by newline \n # We need to clean it before we can assert on - stdout.seek(0) - logs = [json.loads(line.strip()) for line in stdout.readlines()] + logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] first_log, second_log, third_log, fourth_log = logs # First execution @@ -298,15 +298,57 @@ def test_log_metric_partially_correct_args(capsys, invalid_input, expected): assert captured.out == expected +def test_package_logger(capsys): + + set_package_logger() + Tracer(disabled=True) + output = capsys.readouterr() + + assert "Tracing has been disabled" in output.out + + +def test_package_logger_format(stdout, capsys): + set_package_logger(stream=stdout, formatter=JsonFormatter(formatter="test")) + Tracer(disabled=True) + output = json.loads(stdout.getvalue().split("\n")[0]) + + assert "test" in output["formatter"] + + @pytest.mark.parametrize( "invalid_input,expected", [({"unit": "Blah"}, ValueError), ({"unit": None}, ValueError), ({}, TypeError)], ids=["invalid metric unit as str", "unit as None", "missing required unit"], ) -def test_log_metric_invalid_unit(invalid_input, expected): +def test_log_metric_invalid_unit(capsys, invalid_input, expected): # GIVEN invalid units are provided # WHEN log_metric is called # THEN ValueError exception should be raised with pytest.raises(expected): log_metric(name="test_metric", namespace="DemoApp", **invalid_input) + + +def test_logger_setup_deprecated(): + # Should be removed when GA + with pytest.raises(DeprecationWarning): + logger_setup() + + +def test_logger_inject_lambda_context_deprecated(): + # Should be removed when GA + with pytest.raises(DeprecationWarning): + logger_inject_lambda_context() + + +def test_logger_append_duplicated(stdout): + logger = Logger(stream=stdout, request_id="value") + logger.structure_logs(append=True, request_id="new_value") + logger.info("log") + log = json.loads(stdout.getvalue()) + assert "new_value" == log["request_id"] + + +def test_logger_invalid_sampling_rate(): + with pytest.raises(InvalidLoggerSamplingRateError): + Logger(sampling_rate="TEST") diff --git a/python/tests/functional/test_metrics.py b/python/tests/functional/test_metrics.py index 0feaf3303ff..703c8788ac1 100644 --- a/python/tests/functional/test_metrics.py +++ b/python/tests/functional/test_metrics.py @@ -57,15 +57,16 @@ def a_hundred_metrics() -> List[Dict[str, str]]: def serialize_metrics(metrics: List[Dict], dimensions: List[Dict], namespace: Dict) -> Dict: """ Helper function to build EMF object from a list of metrics, dimensions """ - my_metrics = MetricManager() - for metric in metrics: - my_metrics.add_metric(**metric) - + my_metrics = Metrics() for dimension in dimensions: my_metrics.add_dimension(**dimension) my_metrics.add_namespace(**namespace) - return my_metrics.serialize_metric_set() + for metric in metrics: + my_metrics.add_metric(**metric) + + if len(metrics) != 100: + return my_metrics.serialize_metric_set() def serialize_single_metric(metric: Dict, dimension: Dict, namespace: Dict) -> Dict: @@ -173,10 +174,10 @@ def test_namespace_env_var(monkeypatch, capsys, metric, dimension, namespace): assert expected["_aws"] == output["_aws"] -def test_metrics_spillover(capsys, metric, dimension, namespace, a_hundred_metrics): +def test_metrics_spillover(monkeypatch, capsys, metric, dimension, namespace, a_hundred_metrics): my_metrics = Metrics() - my_metrics.add_namespace(**namespace) my_metrics.add_dimension(**dimension) + my_metrics.add_namespace(**namespace) for _metric in a_hundred_metrics: my_metrics.add_metric(**_metric) @@ -194,9 +195,9 @@ def lambda_handler(evt, handler): single_metric = json.loads(single_metric) expected_single_metric = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) - expected_spillover_metrics = serialize_metrics( - metrics=a_hundred_metrics, dimensions=[dimension], namespace=namespace - ) + + serialize_metrics(metrics=a_hundred_metrics, dimensions=[dimension], namespace=namespace) + expected_spillover_metrics = json.loads(capsys.readouterr().out.strip()) remove_timestamp(metrics=[spillover_metrics, expected_spillover_metrics, single_metric, expected_single_metric]) @@ -264,3 +265,41 @@ def test_exceed_number_of_dimensions(metric, namespace): my_metric.add_namespace(**namespace) for dimension in dimensions: my_metric.add_dimension(**dimension) + + +def test_log_metrics_error_propagation(capsys, metric, dimension, namespace): + # GIVEN Metrics are serialized after handler execution + # WHEN If an error occurs and metrics have been added + # THEN we should log metrics and propagate exception up + my_metrics = Metrics() + + my_metrics.add_metric(**metric) + my_metrics.add_dimension(**dimension) + my_metrics.add_namespace(**namespace) + + @my_metrics.log_metrics + def lambda_handler(evt, context): + raise ValueError("Bubble up") + + with pytest.raises(ValueError): + lambda_handler({}, {}) + + output = json.loads(capsys.readouterr().out.strip()) + expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) + + remove_timestamp(metrics=[output, expected]) # Timestamp will always be different + assert expected["_aws"] == output["_aws"] + + +def test_log_no_metrics_error_propagation(capsys, metric, dimension, namespace): + # GIVEN Metrics are serialized after handler execution + # WHEN If an error occurs and no metrics have been added + # THEN we should propagate exception up and raise SchemaValidationError + my_metrics = Metrics() + + @my_metrics.log_metrics + def lambda_handler(evt, context): + raise ValueError("Bubble up") + + with pytest.raises(SchemaValidationError): + lambda_handler({}, {}) diff --git a/python/tests/functional/test_utils.py b/python/tests/functional/test_middleware_factory.py similarity index 82% rename from python/tests/functional/test_utils.py rename to python/tests/functional/test_middleware_factory.py index 141acf9d96f..ee8078f801a 100644 --- a/python/tests/functional/test_utils.py +++ b/python/tests/functional/test_middleware_factory.py @@ -4,6 +4,7 @@ import pytest from aws_lambda_powertools.middleware_factory import lambda_handler_decorator +from aws_lambda_powertools.middleware_factory.exceptions import MiddlewareInvalidArgumentError @pytest.fixture @@ -115,8 +116,26 @@ def log_event(handler, event, context, log_event=False): print(json.dumps(event)) return handler(event, context) - with pytest.raises(TypeError): + with pytest.raises(MiddlewareInvalidArgumentError): @log_event(True) def lambda_handler(evt, ctx): return True + + +def test_factory_middleware_exception_propagation(say_bye_middleware, say_hi_middleware): + class CustomMiddlewareException(Exception): + pass + + @lambda_handler_decorator + def raise_middleware(handler, evt, ctx): + raise CustomMiddlewareException("Raise middleware exception") + + @say_bye_middleware + @raise_middleware + @say_hi_middleware + def lambda_handler(evt, ctx): + return "hello world" + + with pytest.raises(CustomMiddlewareException): + lambda_handler({}, {}) diff --git a/python/tests/unit/test_tracing.py b/python/tests/unit/test_tracing.py index a7b98389e33..f8f43de0bf4 100644 --- a/python/tests/unit/test_tracing.py +++ b/python/tests/unit/test_tracing.py @@ -153,3 +153,31 @@ def test_tracer_no_autopatch(patch_mock): # THEN tracer should not patch any module Tracer(disabled=True, auto_patch=False) assert patch_mock.call_count == 0 + + +def test_tracer_lambda_handler_empty_response_metadata(mocker, xray_stub): + put_metadata_mock = mocker.MagicMock() + xray_provider = xray_stub(put_metadata_mock=put_metadata_mock) + tracer = Tracer(provider=xray_provider) + + @tracer.capture_lambda_handler + def handler(event, context): + return + + handler({}, mocker.MagicMock()) + + assert put_metadata_mock.call_count == 0 + + +def test_tracer_method_empty_response_metadata(mocker, xray_stub): + put_metadata_mock = mocker.MagicMock() + xray_provider = xray_stub(put_metadata_mock=put_metadata_mock) + tracer = Tracer(provider=xray_provider) + + @tracer.capture_method + def greeting(name, message): + return + + greeting(name="Foo", message="Bar") + + assert put_metadata_mock.call_count == 0