From c4135cb055fc715aff57f0d5f5c9131a2bd5ddaf Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 12 Jun 2020 18:05:40 +0100 Subject: [PATCH 1/9] chore: remove Logger deprecated code --- aws_lambda_powertools/logging/__init__.py | 4 +- aws_lambda_powertools/logging/logger.py | 149 +--------------------- tests/functional/test_logger.py | 89 ------------- 3 files changed, 3 insertions(+), 239 deletions(-) diff --git a/aws_lambda_powertools/logging/__init__.py b/aws_lambda_powertools/logging/__init__.py index 4c1bb2ec5c6..e3f8ff29fc5 100644 --- a/aws_lambda_powertools/logging/__init__.py +++ b/aws_lambda_powertools/logging/__init__.py @@ -1,6 +1,6 @@ """Logging utility """ from ..helper.models import MetricUnit -from .logger import Logger, log_metric, logger_inject_lambda_context, logger_setup +from .logger import Logger -__all__ = ["logger_setup", "logger_inject_lambda_context", "log_metric", "MetricUnit", "Logger"] +__all__ = ["MetricUnit", "Logger"] diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 63fa6ed8b28..d9cc53a35dd 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -1,16 +1,14 @@ import copy import functools -import itertools import json import logging import os import random import sys -import warnings from distutils.util import strtobool from typing import Any, Callable, Dict, Union -from ..helper.models import MetricUnit, build_lambda_context_model, build_metric_unit_from_str +from ..helper.models import build_lambda_context_model from .exceptions import InvalidLoggerSamplingRateError logger = logging.getLogger(__name__) @@ -114,44 +112,6 @@ def format(self, record): # noqa: A003 return json_record -def logger_setup( - service: str = None, level: str = None, sampling_rate: float = 0.0, legacy: bool = False, **kwargs -) -> DeprecationWarning: - """DEPRECATED - - This will be removed when GA - Use `aws_lambda_powertools.logging.logger.Logger` instead - - Example - ------- - **Logger class - Same UX** - - from aws_lambda_powertools import Logger - logger = Logger(service="payment") # same env var still applies - - """ - raise DeprecationWarning("Use Logger instead - This method will be removed when GA") - - -def logger_inject_lambda_context( - lambda_handler: Callable[[Dict, Any], Any] = None, log_event: bool = False -) -> DeprecationWarning: - """DEPRECATED - - This will be removed when GA - Use `aws_lambda_powertools.logging.logger.Logger` instead - - Example - ------- - **Logger class - Same UX** - - from aws_lambda_powertools import Logger - logger = Logger(service="payment") # same env var still applies - @logger.inject_lambda_context - def handler(evt, ctx): - pass - """ - raise DeprecationWarning("Use Logger instead - This method will be removed when GA") - - def _is_cold_start() -> bool: """Verifies whether is cold start @@ -170,113 +130,6 @@ def _is_cold_start() -> bool: return cold_start -def log_metric( - name: str, namespace: str, unit: MetricUnit, value: float = 0, service: str = "service_undefined", **dimensions, -): - """Logs a custom metric in a statsD-esque format to stdout. - - **This will be removed when GA - Use `aws_lambda_powertools.metrics.metrics.Metrics` instead** - - Creating Custom Metrics synchronously impact on performance/execution time. - Instead, log_metric prints a metric to CloudWatch Logs. - That allows us to pick them up asynchronously via another Lambda function and create them as a metric. - - NOTE: It takes up to 9 dimensions by default, and Metric units are conveniently available via MetricUnit Enum. - If service is not passed as arg or via env var, "service_undefined" will be used as dimension instead. - - **Output in CloudWatch Logs**: `MONITORING|||||` - - Serverless Application Repository App that creates custom metric from this log output: - https://serverlessrepo.aws.amazon.com/applications/arn:aws:serverlessrepo:us-east-1:374852340823:applications~async-custom-metrics - - Environment variables - --------------------- - POWERTOOLS_SERVICE_NAME: str - service name - - Parameters - ---------- - name : str - metric name, by default None - namespace : str - metric namespace (e.g. application name), by default None - unit : MetricUnit, by default MetricUnit.Count - metric unit enum value (e.g. MetricUnit.Seconds), by default None\n - API Info: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html - value : float, optional - metric value, by default 0 - service : str, optional - service name used as dimension, by default "service_undefined" - dimensions: dict, optional - keyword arguments as additional dimensions (e.g. `customer=customerId`) - - Example - ------- - **Log metric to count number of successful payments; define service via env var** - - $ export POWERTOOLS_SERVICE_NAME="payment" - from aws_lambda_powertools.logging import MetricUnit, log_metric - log_metric( - name="SuccessfulPayments", - unit=MetricUnit.Count, - value=1, - namespace="DemoApp" - ) - - **Log metric to count number of successful payments per campaign & customer** - - from aws_lambda_powertools.logging import MetricUnit, log_metric - log_metric( - name="SuccessfulPayments", - service="payment", - unit=MetricUnit.Count, - value=1, - namespace="DemoApp", - campaign=campaign_id, - customer=customer_id - ) - """ - - warnings.warn(message="This method will be removed in GA; use Metrics instead", category=DeprecationWarning) - logger.debug(f"Building new custom metric. Name: {name}, Unit: {unit}, Value: {value}, Dimensions: {dimensions}") - service = os.getenv("POWERTOOLS_SERVICE_NAME") or service - dimensions = __build_dimensions(**dimensions) - unit = build_metric_unit_from_str(unit) - - metric = f"MONITORING|{value}|{unit.name}|{name}|{namespace}|service={service}" - if dimensions: - metric = f"MONITORING|{value}|{unit.name}|{name}|{namespace}|service={service},{dimensions}" - - print(metric) - - -def __build_dimensions(**dimensions) -> str: - """Builds correct format for custom metric dimensions from kwargs - - Parameters - ---------- - dimensions: dict, optional - additional dimensions - - Returns - ------- - str - Dimensions in the form of "key=value,key2=value2" - """ - MAX_DIMENSIONS = 10 - dimension = "" - - # CloudWatch accepts a max of 10 dimensions per metric - # We include service name as a dimension - # so we take up to 9 values as additional dimensions - # before we convert everything to a string of key=value - dimensions_partition = dict(itertools.islice(dimensions.items(), MAX_DIMENSIONS)) - dimensions_list = [dimension + "=" + value for dimension, value in dimensions_partition.items() if value] - dimension = ",".join(dimensions_list) - - return dimension - - class Logger(logging.Logger): """Creates and setups a logger to format statements in JSON. diff --git a/tests/functional/test_logger.py b/tests/functional/test_logger.py index 19c45a8587e..6b6a4bb6dde 100644 --- a/tests/functional/test_logger.py +++ b/tests/functional/test_logger.py @@ -6,7 +6,6 @@ import pytest from aws_lambda_powertools import Logger, Tracer -from aws_lambda_powertools.logging import MetricUnit, log_metric, logger_inject_lambda_context, logger_setup from aws_lambda_powertools.logging.exceptions import InvalidLoggerSamplingRateError from aws_lambda_powertools.logging.logger import JsonFormatter, set_package_logger @@ -236,68 +235,6 @@ def handler(event, context): assert fourth_log["cold_start"] is False -def test_log_metric(capsys): - # GIVEN a service, unit and value have been provided - # WHEN log_metric is called - # THEN custom metric line should be match given values - log_metric( - service="payment", name="test_metric", unit=MetricUnit.Seconds, value=60, namespace="DemoApp", - ) - expected = "MONITORING|60|Seconds|test_metric|DemoApp|service=payment\n" - captured = capsys.readouterr() - - assert captured.out == expected - - -def test_log_metric_env_var(monkeypatch, capsys): - # GIVEN a service, unit and value have been provided - # WHEN log_metric is called - # THEN custom metric line should be match given values - service_name = "payment" - monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", service_name) - - log_metric(name="test_metric", unit=MetricUnit.Seconds, value=60, namespace="DemoApp") - expected = "MONITORING|60|Seconds|test_metric|DemoApp|service=payment\n" - captured = capsys.readouterr() - - assert captured.out == expected - - -def test_log_metric_multiple_dimensions(capsys): - # GIVEN multiple optional dimensions are provided - # WHEN log_metric is called - # THEN dimensions should appear as dimenion=value - log_metric( - name="test_metric", unit=MetricUnit.Seconds, value=60, customer="abc", charge_id="123", namespace="DemoApp", - ) - expected = "MONITORING|60|Seconds|test_metric|DemoApp|service=service_undefined,customer=abc,charge_id=123\n" - captured = capsys.readouterr() - - assert captured.out == expected - - -@pytest.mark.parametrize( - "invalid_input,expected", - [ - ({"unit": "seconds"}, "MONITORING|0|Seconds|test_metric|DemoApp|service=service_undefined\n",), - ( - {"unit": "Seconds", "customer": None, "charge_id": "123", "payment_status": ""}, - "MONITORING|0|Seconds|test_metric|DemoApp|service=service_undefined,charge_id=123\n", - ), - ], - ids=["metric unit as string lower case", "empty dimension value"], -) -def test_log_metric_partially_correct_args(capsys, invalid_input, expected): - # GIVEN invalid arguments are provided such as empty dimension values and metric units in strings - # WHEN log_metric is called - # THEN default values should be used such as "Count" as a unit, invalid dimensions not included - # and no exception raised - log_metric(name="test_metric", namespace="DemoApp", **invalid_input) - captured = capsys.readouterr() - - assert captured.out == expected - - def test_package_logger(capsys): set_package_logger() @@ -315,32 +252,6 @@ def test_package_logger_format(stdout, capsys): assert "test" in output["formatter"] -@pytest.mark.parametrize( - "invalid_input,expected", - [({"unit": "Blah"}, ValueError), ({"unit": None}, ValueError), ({}, TypeError)], - ids=["invalid metric unit as str", "unit as None", "missing required unit"], -) -def test_log_metric_invalid_unit(capsys, invalid_input, expected): - # GIVEN invalid units are provided - # WHEN log_metric is called - # THEN ValueError exception should be raised - - with pytest.raises(expected): - log_metric(name="test_metric", namespace="DemoApp", **invalid_input) - - -def test_logger_setup_deprecated(): - # Should be removed when GA - with pytest.raises(DeprecationWarning): - logger_setup() - - -def test_logger_inject_lambda_context_deprecated(): - # Should be removed when GA - with pytest.raises(DeprecationWarning): - logger_inject_lambda_context() - - def test_logger_append_duplicated(stdout): logger = Logger(stream=stdout, request_id="value") logger.structure_logs(append=True, request_id="new_value") From d00b9260892f69b649e52fcd6f0e7f4ff64132bd Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Sat, 13 Jun 2020 18:06:32 +0100 Subject: [PATCH 2/9] chore: remove Metrics deprecated code --- aws_lambda_powertools/helper/models.py | 31 ---- aws_lambda_powertools/metrics/__init__.py | 3 +- aws_lambda_powertools/metrics/base.py | 29 +--- aws_lambda_powertools/metrics/exceptions.py | 6 - tests/functional/test_metrics.py | 155 ++++++-------------- 5 files changed, 44 insertions(+), 180 deletions(-) diff --git a/aws_lambda_powertools/helper/models.py b/aws_lambda_powertools/helper/models.py index bc6ea23eaa6..10dbfce37c9 100644 --- a/aws_lambda_powertools/helper/models.py +++ b/aws_lambda_powertools/helper/models.py @@ -4,7 +4,6 @@ """ from enum import Enum -from typing import Union class LambdaContextModel: @@ -100,33 +99,3 @@ class MetricUnit(Enum): GigabitsPerSecond = "Gigabits/Second" TerabitsPerSecond = "Terabits/Second" CountPerSecond = "Count/Second" - - -def build_metric_unit_from_str(unit: Union[str, MetricUnit]) -> MetricUnit: - """Builds correct metric unit value from string or return Count as default - - Parameters - ---------- - unit : str, MetricUnit - metric unit - - Returns - ------- - MetricUnit - Metric Unit enum from string value or MetricUnit.Count as a default - """ - if isinstance(unit, MetricUnit): - return unit - - if isinstance(unit, str): - unit = unit.lower().capitalize() - - metric_unit = None - - try: - metric_unit = MetricUnit[unit] - except (TypeError, KeyError): - metric_units = [units for units, _ in MetricUnit.__members__.items()] - raise ValueError(f"Invalid Metric Unit - Received {unit}. Value Metric Units are {metric_units}") - - return metric_unit diff --git a/aws_lambda_powertools/metrics/__init__.py b/aws_lambda_powertools/metrics/__init__.py index 2f71957437d..ebf2181c7bf 100644 --- a/aws_lambda_powertools/metrics/__init__.py +++ b/aws_lambda_powertools/metrics/__init__.py @@ -1,7 +1,7 @@ """CloudWatch Embedded Metric Format utility """ from ..helper.models import MetricUnit -from .exceptions import MetricUnitError, MetricValueError, SchemaValidationError, UniqueNamespaceError +from .exceptions import MetricUnitError, MetricValueError, SchemaValidationError from .metric import single_metric from .metrics import Metrics @@ -12,5 +12,4 @@ "MetricUnitError", "SchemaValidationError", "MetricValueError", - "UniqueNamespaceError", ] diff --git a/aws_lambda_powertools/metrics/base.py b/aws_lambda_powertools/metrics/base.py index d6529cf71e2..284228e21e7 100644 --- a/aws_lambda_powertools/metrics/base.py +++ b/aws_lambda_powertools/metrics/base.py @@ -4,13 +4,12 @@ import numbers import os import pathlib -import warnings from typing import Dict, List, Union import fastjsonschema from ..helper.models import MetricUnit -from .exceptions import MetricUnitError, MetricValueError, SchemaValidationError, UniqueNamespaceError +from .exceptions import MetricUnitError, MetricValueError, SchemaValidationError logger = logging.getLogger(__name__) @@ -45,8 +44,6 @@ class MetricManager: When metric metric isn't supported by CloudWatch MetricValueError When metric value isn't a number - UniqueNamespaceError - When an additional namespace is set SchemaValidationError When metric object fails EMF schema validation """ @@ -61,30 +58,6 @@ def __init__( self._metric_units = [unit.value for unit in MetricUnit] self._metric_unit_options = list(MetricUnit.__members__) - def add_namespace(self, name: str): - """Adds given metric namespace - - Example - ------- - **Add metric namespace** - - metric.add_namespace(name="ServerlessAirline") - - Parameters - ---------- - name : str - Metric namespace - """ - warnings.warn( - "add_namespace method is deprecated. Pass namespace to Metrics constructor instead", DeprecationWarning - ) - if self.namespace is not None: - raise UniqueNamespaceError( - f"Namespace '{self.namespace}' already set - Only one namespace is allowed across metrics" - ) - logger.debug(f"Adding metrics namespace: {name}") - self.namespace = name - def add_metric(self, name: str, unit: MetricUnit, value: Union[float, int]): """Adds given metric diff --git a/aws_lambda_powertools/metrics/exceptions.py b/aws_lambda_powertools/metrics/exceptions.py index 88a38c24229..0376c55a40e 100644 --- a/aws_lambda_powertools/metrics/exceptions.py +++ b/aws_lambda_powertools/metrics/exceptions.py @@ -14,9 +14,3 @@ class MetricValueError(Exception): """When metric value isn't a valid number""" pass - - -class UniqueNamespaceError(Exception): - """When an additional namespace is set""" - - pass diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 3e2ebe34fe1..244a56119cd 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -6,13 +6,7 @@ import pytest from aws_lambda_powertools import Metrics, single_metric -from aws_lambda_powertools.metrics import ( - MetricUnit, - MetricUnitError, - MetricValueError, - SchemaValidationError, - UniqueNamespaceError, -) +from aws_lambda_powertools.metrics import MetricUnit, MetricUnitError, MetricValueError, SchemaValidationError from aws_lambda_powertools.metrics.base import MetricManager @@ -59,11 +53,11 @@ def non_str_dimensions() -> List[Dict[str, Any]]: @pytest.fixture def namespace() -> Dict[str, str]: - return {"name": "test_namespace"} + return "test_namespace" @pytest.fixture -def a_hundred_metrics() -> List[Dict[str, str]]: +def a_hundred_metrics(namespace=namespace) -> List[Dict[str, str]]: metrics = [] for i in range(100): metrics.append({"name": f"metric_{i}", "unit": "Count", "value": 1}) @@ -71,13 +65,12 @@ def a_hundred_metrics() -> List[Dict[str, str]]: return metrics -def serialize_metrics(metrics: List[Dict], dimensions: List[Dict], namespace: Dict) -> Dict: +def serialize_metrics(metrics: List[Dict], dimensions: List[Dict], namespace: str) -> Dict: """ Helper function to build EMF object from a list of metrics, dimensions """ - my_metrics = MetricManager() + my_metrics = MetricManager(namespace=namespace) for dimension in dimensions: my_metrics.add_dimension(**dimension) - my_metrics.add_namespace(**namespace) for metric in metrics: my_metrics.add_metric(**metric) @@ -85,12 +78,11 @@ def serialize_metrics(metrics: List[Dict], dimensions: List[Dict], namespace: Di return my_metrics.serialize_metric_set() -def serialize_single_metric(metric: Dict, dimension: Dict, namespace: Dict) -> Dict: +def serialize_single_metric(metric: Dict, dimension: Dict, namespace: str) -> Dict: """ Helper function to build EMF object from a given metric, dimension and namespace """ - my_metrics = MetricManager() + my_metrics = MetricManager(namespace=namespace) my_metrics.add_metric(**metric) my_metrics.add_dimension(**dimension) - my_metrics.add_namespace(**namespace) return my_metrics.serialize_metric_set() @@ -103,11 +95,10 @@ def remove_timestamp(metrics: List): def test_single_metric_one_metric_only(capsys, metric, dimension, namespace): # GIVEN we attempt to add more than one metric # WHEN using single_metric context manager - with single_metric(**metric) as my_metric: + with single_metric(namespace=namespace, **metric) as my_metric: my_metric.add_metric(name="second_metric", unit="Count", value=1) my_metric.add_metric(name="third_metric", unit="Seconds", value=1) my_metric.add_dimension(**dimension) - my_metric.add_namespace(**namespace) output = json.loads(capsys.readouterr().out.strip()) expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) @@ -118,25 +109,9 @@ def test_single_metric_one_metric_only(capsys, metric, dimension, namespace): assert expected["_aws"] == output["_aws"] -def test_multiple_namespaces_exception(metric, dimension, namespace): - # GIVEN we attempt to add multiple namespaces - namespace_a = {"name": "OtherNamespace"} - namespace_b = {"name": "AnotherNamespace"} - - # WHEN an EMF object can only have one - # THEN we should raise UniqueNamespaceError exception - with pytest.raises(UniqueNamespaceError): - with single_metric(**metric) as my_metric: - my_metric.add_dimension(**dimension) - my_metric.add_namespace(**namespace) - my_metric.add_namespace(**namespace_a) - my_metric.add_namespace(**namespace_b) - - def test_log_metrics(capsys, metrics, dimensions, namespace): # GIVEN Metrics is initialized - my_metrics = Metrics() - my_metrics.add_namespace(**namespace) + my_metrics = Metrics(namespace=namespace) for metric in metrics: my_metrics.add_metric(**metric) for dimension in dimensions: @@ -164,7 +139,7 @@ def lambda_handler(evt, ctx): def test_namespace_env_var(monkeypatch, capsys, metric, dimension, namespace): # GIVEN we use POWERTOOLS_METRICS_NAMESPACE - monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", namespace["name"]) + monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", namespace) # WHEN creating a metric but don't explicitly # add a namespace @@ -185,7 +160,7 @@ def test_namespace_env_var(monkeypatch, capsys, metric, dimension, namespace): def test_service_env_var(monkeypatch, capsys, metric, namespace): # GIVEN we use POWERTOOLS_SERVICE_NAME monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", "test_service") - my_metrics = Metrics(namespace=namespace["name"]) + my_metrics = Metrics(namespace=namespace) # WHEN creating a metric but don't explicitly # add a dimension @@ -210,9 +185,8 @@ def lambda_handler(evt, context): def test_metrics_spillover(monkeypatch, capsys, metric, dimension, namespace, a_hundred_metrics): # GIVEN Metrics is initialized and we have over a hundred metrics to add - my_metrics = Metrics() + my_metrics = Metrics(namespace=namespace) my_metrics.add_dimension(**dimension) - my_metrics.add_namespace(**namespace) # WHEN we add more than 100 metrics for _metric in a_hundred_metrics: @@ -242,12 +216,11 @@ def test_metrics_spillover(monkeypatch, capsys, metric, dimension, namespace, a_ def test_log_metrics_should_invoke_function(metric, dimension, namespace): # GIVEN Metrics is initialized - my_metrics = Metrics() + my_metrics = Metrics(namespace=namespace) # WHEN log_metrics is used to serialize metrics @my_metrics.log_metrics def lambda_handler(evt, context): - my_metrics.add_namespace(**namespace) my_metrics.add_metric(**metric) my_metrics.add_dimension(**dimension) return True @@ -266,7 +239,6 @@ def test_incorrect_metric_unit(metric, dimension, namespace): with pytest.raises(MetricUnitError): with single_metric(**metric) as my_metric: my_metric.add_dimension(**dimension) - my_metric.add_namespace(**namespace) def test_schema_no_namespace(metric, dimension): @@ -289,13 +261,11 @@ def test_schema_incorrect_value(metric, dimension, namespace): with pytest.raises(MetricValueError): with single_metric(**metric) as my_metric: my_metric.add_dimension(**dimension) - my_metric.add_namespace(**namespace) def test_schema_no_metrics(dimensions, namespace): # GIVEN Metrics is initialized - my_metrics = Metrics() - my_metrics.add_namespace(**namespace) + my_metrics = Metrics(namespace=namespace) # WHEN no metrics have been added # but a namespace and dimensions only @@ -317,18 +287,16 @@ def test_exceed_number_of_dimensions(metric, namespace): # THEN it should fail validation and raise SchemaValidationError with pytest.raises(SchemaValidationError): with single_metric(**metric) as my_metric: - my_metric.add_namespace(**namespace) for dimension in dimensions: my_metric.add_dimension(**dimension) def test_log_metrics_during_exception(capsys, metric, dimension, namespace): # GIVEN Metrics is initialized - my_metrics = Metrics() + my_metrics = Metrics(namespace=namespace) my_metrics.add_metric(**metric) my_metrics.add_dimension(**dimension) - my_metrics.add_namespace(**namespace) # WHEN log_metrics is used to serialize metrics # but an error has been raised during handler execution @@ -347,18 +315,18 @@ def lambda_handler(evt, context): assert expected["_aws"] == output["_aws"] -def test_log_no_metrics_error_propagation(capsys, metric, dimension, namespace): +def test_log_metrics_raise_on_empty_metrics(capsys, metric, dimension, namespace): # GIVEN Metrics is initialized - my_metrics = Metrics() + my_metrics = Metrics(service="test_service", namespace=namespace) @my_metrics.log_metrics(raise_on_empty_metrics=True) def lambda_handler(evt, context): # WHEN log_metrics is used with raise_on_empty_metrics param and has no metrics - # and the function decorated also raised an exception - raise ValueError("Bubble up") + return True - # THEN the raised exception should be - with pytest.raises(SchemaValidationError): + # THEN the raised exception should be SchemaValidationError + # and specifically about the lack of Metrics + with pytest.raises(SchemaValidationError, match="_aws\.CloudWatchMetrics\[0\]\.Metrics"): # noqa: W605 lambda_handler({}, {}) @@ -370,9 +338,8 @@ def test_all_possible_metric_units(metric, dimension, namespace): metric["unit"] = unit.name # WHEN we iterate over all available metric unit keys from MetricUnit enum # THEN we raise no MetricUnitError nor SchemaValidationError - with single_metric(**metric) as my_metric: + with single_metric(namespace=namespace, **metric) as my_metric: my_metric.add_dimension(**dimension) - my_metric.add_namespace(**namespace) # WHEN we iterate over all available metric unit keys from MetricUnit enum all_metric_units = [unit.value for unit in MetricUnit] @@ -381,18 +348,17 @@ def test_all_possible_metric_units(metric, dimension, namespace): for unit in all_metric_units: metric["unit"] = unit # THEN we raise no MetricUnitError nor SchemaValidationError - with single_metric(**metric) as my_metric: + with single_metric(namespace=namespace, **metric) as my_metric: my_metric.add_dimension(**dimension) - my_metric.add_namespace(**namespace) def test_metrics_reuse_metric_set(metric, dimension, namespace): # GIVEN Metrics is initialized - my_metrics = Metrics() + my_metrics = Metrics(namespace=namespace) my_metrics.add_metric(**metric) # WHEN Metrics is initialized one more time - my_metrics_2 = Metrics() + my_metrics_2 = Metrics(namespace=namespace) # THEN Both class instances should have the same metric set assert my_metrics_2.metric_set == my_metrics.metric_set @@ -400,11 +366,10 @@ def test_metrics_reuse_metric_set(metric, dimension, namespace): def test_log_metrics_clear_metrics_after_invocation(metric, dimension, namespace): # GIVEN Metrics is initialized - my_metrics = Metrics() + my_metrics = Metrics(namespace=namespace) my_metrics.add_metric(**metric) my_metrics.add_dimension(**dimension) - my_metrics.add_namespace(**namespace) # WHEN log_metrics is used to flush metrics from memory @my_metrics.log_metrics @@ -419,8 +384,7 @@ def lambda_handler(evt, context): def test_log_metrics_non_string_dimension_values(capsys, metrics, non_str_dimensions, namespace): # GIVEN Metrics is initialized and dimensions with non-string values are added - my_metrics = Metrics() - my_metrics.add_namespace(**namespace) + my_metrics = Metrics(namespace=namespace) for metric in metrics: my_metrics.add_metric(**metric) for dimension in non_str_dimensions: @@ -441,16 +405,9 @@ def lambda_handler(evt, ctx): assert isinstance(output[dimension["name"]], str) -def test_add_namespace_warns_for_deprecation(capsys, metrics, dimensions, namespace): - # GIVEN Metrics is initialized - my_metrics = Metrics() - with pytest.deprecated_call(): - my_metrics.add_namespace(**namespace) - - def test_log_metrics_with_explicit_namespace(capsys, metrics, dimensions, namespace): # GIVEN Metrics is initialized with service specified - my_metrics = Metrics(service="test_service", namespace=namespace["name"]) + my_metrics = Metrics(service="test_service", namespace=namespace) for metric in metrics: my_metrics.add_metric(**metric) for dimension in dimensions: @@ -476,9 +433,9 @@ def lambda_handler(evt, ctx): assert expected == output -def test_log_metrics_with_implicit_dimensions(capsys, metrics): +def test_log_metrics_with_implicit_dimensions(capsys, metrics, namespace): # GIVEN Metrics is initialized with service specified - my_metrics = Metrics(service="test_service", namespace="test_application") + my_metrics = Metrics(service="test_service", namespace=namespace) for metric in metrics: my_metrics.add_metric(**metric) @@ -492,9 +449,7 @@ def lambda_handler(evt, ctx): output = json.loads(capsys.readouterr().out.strip()) expected_dimensions = [{"name": "service", "value": "test_service"}] - expected = serialize_metrics( - metrics=metrics, dimensions=expected_dimensions, namespace={"name": "test_application"} - ) + expected = serialize_metrics(metrics=metrics, dimensions=expected_dimensions, namespace=namespace) remove_timestamp(metrics=[output, expected]) # Timestamp will always be different @@ -530,37 +485,15 @@ def lambda_handler(evt, ctx): assert second_output["service"] == "another_test_service" -def test_log_metrics_with_namespace_overridden(capsys, metrics, dimensions): - # GIVEN Metrics is initialized with namespace specified - my_metrics = Metrics(namespace="test_service") - for metric in metrics: - my_metrics.add_metric(**metric) - for dimension in dimensions: - my_metrics.add_dimension(**dimension) - - # WHEN we try to call add_namespace - # THEN we should raise UniqueNamespaceError exception - @my_metrics.log_metrics - def lambda_handler(evt, ctx): - my_metrics.add_namespace(name="new_namespace") - return True - - with pytest.raises(UniqueNamespaceError): - lambda_handler({}, {}) - - with pytest.raises(UniqueNamespaceError): - my_metrics.add_namespace(name="another_new_namespace") - - -def test_single_metric_with_service(capsys, metric, dimension): +def test_single_metric_with_service(capsys, metric, dimension, namespace): # GIVEN we pass namespace parameter to single_metric # WHEN creating a metric - with single_metric(**metric, namespace="test_service") as my_metrics: + with single_metric(**metric, namespace=namespace) as my_metrics: my_metrics.add_dimension(**dimension) output = json.loads(capsys.readouterr().out.strip()) - expected = serialize_single_metric(metric=metric, dimension=dimension, namespace={"name": "test_service"}) + expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) remove_timestamp(metrics=[output, expected]) # Timestamp will always be different @@ -570,10 +503,10 @@ def test_single_metric_with_service(capsys, metric, dimension): def test_namespace_var_precedence(monkeypatch, capsys, metric, dimension, namespace): # GIVEN we use POWERTOOLS_METRICS_NAMESPACE - monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", namespace["name"]) + monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", namespace) # WHEN creating a metric and explicitly set a namespace - with single_metric(**metric, namespace=namespace["name"]) as my_metrics: + with single_metric(namespace=namespace, **metric) as my_metrics: my_metrics.add_dimension(**dimension) monkeypatch.delenv("POWERTOOLS_METRICS_NAMESPACE") @@ -588,8 +521,7 @@ def test_namespace_var_precedence(monkeypatch, capsys, metric, dimension, namesp def test_emit_cold_start_metric(capsys, namespace): # GIVEN Metrics is initialized - my_metrics = Metrics() - my_metrics.add_namespace(**namespace) + my_metrics = Metrics(service="test_service", namespace=namespace) # WHEN log_metrics is used with capture_cold_start_metric @my_metrics.log_metrics(capture_cold_start_metric=True) @@ -608,8 +540,7 @@ def lambda_handler(evt, context): def test_emit_cold_start_metric_only_once(capsys, namespace, dimension, metric): # GIVEN Metrics is initialized - my_metrics = Metrics() - my_metrics.add_namespace(**namespace) + my_metrics = Metrics(namespace=namespace) # WHEN log_metrics is used with capture_cold_start_metric # and handler is called more than once @@ -635,7 +566,7 @@ def lambda_handler(evt, context): def test_log_metrics_decorator_no_metrics(dimensions, namespace): # GIVEN Metrics is initialized - my_metrics = Metrics(namespace=namespace["name"], service="test_service") + my_metrics = Metrics(namespace=namespace, service="test_service") # WHEN using the log_metrics decorator and no metrics have been added @my_metrics.log_metrics @@ -649,9 +580,9 @@ def lambda_handler(evt, context): assert str(w[-1].message) == "No metrics to publish, skipping" -def test_log_metrics_with_implicit_dimensions_called_twice(capsys, metrics): +def test_log_metrics_with_implicit_dimensions_called_twice(capsys, metrics, namespace): # GIVEN Metrics is initialized with service specified - my_metrics = Metrics(service="test_service", namespace="test_application") + my_metrics = Metrics(service="test_service", namespace=namespace) # WHEN we utilize log_metrics to serialize and don't explicitly add any dimensions, # and the lambda function is called more than once @@ -668,9 +599,7 @@ def lambda_handler(evt, ctx): second_output = json.loads(capsys.readouterr().out.strip()) expected_dimensions = [{"name": "service", "value": "test_service"}] - expected = serialize_metrics( - metrics=metrics, dimensions=expected_dimensions, namespace={"name": "test_application"} - ) + expected = serialize_metrics(metrics=metrics, dimensions=expected_dimensions, namespace=namespace) remove_timestamp(metrics=[output, expected, second_output]) # Timestamp will always be different From 6af9f9198a623a809f40d77ffdd3d7dadc2da1f4 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Sat, 13 Jun 2020 18:26:53 +0100 Subject: [PATCH 3/9] chore: remove models from deprecated code --- aws_lambda_powertools/helper/__init__.py | 2 - aws_lambda_powertools/logging/__init__.py | 3 +- .../models.py => logging/lambda_context.py} | 52 ++----------------- aws_lambda_powertools/logging/logger.py | 2 +- aws_lambda_powertools/metrics/__init__.py | 2 +- aws_lambda_powertools/metrics/base.py | 31 ++++++++++- aws_lambda_powertools/metrics/metric.py | 3 +- 7 files changed, 37 insertions(+), 58 deletions(-) delete mode 100644 aws_lambda_powertools/helper/__init__.py rename aws_lambda_powertools/{helper/models.py => logging/lambda_context.py} (50%) diff --git a/aws_lambda_powertools/helper/__init__.py b/aws_lambda_powertools/helper/__init__.py deleted file mode 100644 index eb6356d8ec9..00000000000 --- a/aws_lambda_powertools/helper/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -"""Collection of reusable code shared across powertools utilities -""" diff --git a/aws_lambda_powertools/logging/__init__.py b/aws_lambda_powertools/logging/__init__.py index e3f8ff29fc5..0456b202ffa 100644 --- a/aws_lambda_powertools/logging/__init__.py +++ b/aws_lambda_powertools/logging/__init__.py @@ -1,6 +1,5 @@ """Logging utility """ -from ..helper.models import MetricUnit from .logger import Logger -__all__ = ["MetricUnit", "Logger"] +__all__ = ["Logger"] diff --git a/aws_lambda_powertools/helper/models.py b/aws_lambda_powertools/logging/lambda_context.py similarity index 50% rename from aws_lambda_powertools/helper/models.py rename to aws_lambda_powertools/logging/lambda_context.py index 10dbfce37c9..75da8711f03 100644 --- a/aws_lambda_powertools/helper/models.py +++ b/aws_lambda_powertools/logging/lambda_context.py @@ -1,32 +1,15 @@ -"""Collection of classes as models and builder functions -that provide classes as data representation for -key data used in more than one place. -""" - -from enum import Enum - - class LambdaContextModel: """A handful of Lambda Runtime Context fields Full Lambda Context object: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html - NOTE - ---- - - Originally, memory_size is `int` but we cast to `str` in this model - due to aws_lambda_logging library use of `%` during formatting - Ref: https://gitlab.com/hadrien/aws_lambda_logging/blob/master/aws_lambda_logging.py#L47 - Parameters ---------- function_name: str Lambda function name, by default "UNDEFINED" e.g. "test" - function_memory_size: str - Lambda function memory in MB, by default "UNDEFINED" - e.g. "128" - casting from int to str due to aws_lambda_logging using `%` when enumerating fields + function_memory_size: int + Lambda function memory in MB, by default 128 function_arn: str Lambda function ARN, by default "UNDEFINED" e.g. "arn:aws:lambda:eu-west-1:809313241:function:test" @@ -38,7 +21,7 @@ class LambdaContextModel: def __init__( self, function_name: str = "UNDEFINED", - function_memory_size: str = "UNDEFINED", + function_memory_size: int = 128, function_arn: str = "UNDEFINED", function_request_id: str = "UNDEFINED", ): @@ -70,32 +53,3 @@ def build_lambda_context_model(context: object) -> LambdaContextModel: } return LambdaContextModel(**context) - - -class MetricUnit(Enum): - Seconds = "Seconds" - Microseconds = "Microseconds" - Milliseconds = "Milliseconds" - Bytes = "Bytes" - Kilobytes = "Kilobytes" - Megabytes = "Megabytes" - Gigabytes = "Gigabytes" - Terabytes = "Terabytes" - Bits = "Bits" - Kilobits = "Kilobits" - Megabits = "Megabits" - Gigabits = "Gigabits" - Terabits = "Terabits" - Percent = "Percent" - Count = "Count" - BytesPerSecond = "Bytes/Second" - KilobytesPerSecond = "Kilobytes/Second" - MegabytesPerSecond = "Megabytes/Second" - GigabytesPerSecond = "Gigabytes/Second" - TerabytesPerSecond = "Terabytes/Second" - BitsPerSecond = "Bits/Second" - KilobitsPerSecond = "Kilobits/Second" - MegabitsPerSecond = "Megabits/Second" - GigabitsPerSecond = "Gigabits/Second" - TerabitsPerSecond = "Terabits/Second" - CountPerSecond = "Count/Second" diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index d9cc53a35dd..f61cf162405 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -8,8 +8,8 @@ from distutils.util import strtobool from typing import Any, Callable, Dict, Union -from ..helper.models import build_lambda_context_model from .exceptions import InvalidLoggerSamplingRateError +from .lambda_context import build_lambda_context_model logger = logging.getLogger(__name__) diff --git a/aws_lambda_powertools/metrics/__init__.py b/aws_lambda_powertools/metrics/__init__.py index ebf2181c7bf..7379dad8b88 100644 --- a/aws_lambda_powertools/metrics/__init__.py +++ b/aws_lambda_powertools/metrics/__init__.py @@ -1,6 +1,6 @@ """CloudWatch Embedded Metric Format utility """ -from ..helper.models import MetricUnit +from .base import MetricUnit from .exceptions import MetricUnitError, MetricValueError, SchemaValidationError from .metric import single_metric from .metrics import Metrics diff --git a/aws_lambda_powertools/metrics/base.py b/aws_lambda_powertools/metrics/base.py index 284228e21e7..1eece781bbf 100644 --- a/aws_lambda_powertools/metrics/base.py +++ b/aws_lambda_powertools/metrics/base.py @@ -4,11 +4,11 @@ import numbers import os import pathlib +from enum import Enum from typing import Dict, List, Union import fastjsonschema -from ..helper.models import MetricUnit from .exceptions import MetricUnitError, MetricValueError, SchemaValidationError logger = logging.getLogger(__name__) @@ -20,6 +20,35 @@ MAX_METRICS = 100 +class MetricUnit(Enum): + Seconds = "Seconds" + Microseconds = "Microseconds" + Milliseconds = "Milliseconds" + Bytes = "Bytes" + Kilobytes = "Kilobytes" + Megabytes = "Megabytes" + Gigabytes = "Gigabytes" + Terabytes = "Terabytes" + Bits = "Bits" + Kilobits = "Kilobits" + Megabits = "Megabits" + Gigabits = "Gigabits" + Terabits = "Terabits" + Percent = "Percent" + Count = "Count" + BytesPerSecond = "Bytes/Second" + KilobytesPerSecond = "Kilobytes/Second" + MegabytesPerSecond = "Megabytes/Second" + GigabytesPerSecond = "Gigabytes/Second" + TerabytesPerSecond = "Terabytes/Second" + BitsPerSecond = "Bits/Second" + KilobitsPerSecond = "Kilobits/Second" + MegabitsPerSecond = "Megabits/Second" + GigabitsPerSecond = "Gigabits/Second" + TerabitsPerSecond = "Terabits/Second" + CountPerSecond = "Count/Second" + + class MetricManager: """Base class for metric functionality (namespace, metric, dimension, serialization) diff --git a/aws_lambda_powertools/metrics/metric.py b/aws_lambda_powertools/metrics/metric.py index 53434b4a9d4..1293139afbe 100644 --- a/aws_lambda_powertools/metrics/metric.py +++ b/aws_lambda_powertools/metrics/metric.py @@ -3,8 +3,7 @@ from contextlib import contextmanager from typing import Dict -from ..helper.models import MetricUnit -from .base import MetricManager +from .base import MetricManager, MetricUnit logger = logging.getLogger(__name__) From acfd083d5b584372f601607f7490186f04a080b7 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Sat, 13 Jun 2020 18:36:24 +0100 Subject: [PATCH 4/9] chore: move logger formatter to its own file --- aws_lambda_powertools/logging/formatter.py | 99 ++++++++++++++++++++++ aws_lambda_powertools/logging/logger.py | 98 +-------------------- 2 files changed, 100 insertions(+), 97 deletions(-) create mode 100644 aws_lambda_powertools/logging/formatter.py diff --git a/aws_lambda_powertools/logging/formatter.py b/aws_lambda_powertools/logging/formatter.py new file mode 100644 index 00000000000..8aa07069f97 --- /dev/null +++ b/aws_lambda_powertools/logging/formatter.py @@ -0,0 +1,99 @@ +import json +import logging +from typing import Any + + +def json_formatter(unserialized_value: Any): + """JSON custom serializer to cast unserialisable values to strings. + + Example + ------- + + **Serialize unserialisable value to string** + + class X: pass + value = {"x": X()} + + json.dumps(value, default=json_formatter) + + Parameters + ---------- + unserialized_value: Any + Python object unserializable by JSON + """ + return str(unserialized_value) + + +class JsonFormatter(logging.Formatter): + """AWS Lambda Logging formatter. + + Formats the log message as a JSON encoded string. If the message is a + dict it will be used directly. If the message can be parsed as JSON, then + the parse d value is used in the output record. + + Originally taken from https://gitlab.com/hadrien/aws_lambda_logging/ + + """ + + def __init__(self, **kwargs): + """Return a JsonFormatter instance. + + The `json_default` kwarg is used to specify a formatter for otherwise + unserialisable values. It must not throw. Defaults to a function that + coerces the value to a string. + + Other kwargs are used to specify log field format strings. + """ + datefmt = kwargs.pop("datefmt", None) + + super(JsonFormatter, self).__init__(datefmt=datefmt) + self.reserved_keys = ["timestamp", "level", "location"] + self.format_dict = { + "timestamp": "%(asctime)s", + "level": "%(levelname)s", + "location": "%(funcName)s:%(lineno)d", + } + self.format_dict.update(kwargs) + self.default_json_formatter = kwargs.pop("json_default", json_formatter) + + def format(self, record): # noqa: A003 + record_dict = record.__dict__.copy() + record_dict["asctime"] = self.formatTime(record, self.datefmt) + + log_dict = {} + for key, value in self.format_dict.items(): + if value and key in self.reserved_keys: + # converts default logging expr to its record value + # e.g. '%(asctime)s' to '2020-04-24 09:35:40,698' + log_dict[key] = value % record_dict + else: + log_dict[key] = value + + if isinstance(record_dict["msg"], dict): + log_dict["message"] = record_dict["msg"] + else: + log_dict["message"] = record.getMessage() + + # Attempt to decode the message as JSON, if so, merge it with the + # overall message for clarity. + try: + log_dict["message"] = json.loads(log_dict["message"]) + except (json.decoder.JSONDecodeError, TypeError, ValueError): + pass + + if record.exc_info: + # Cache the traceback text to avoid converting it multiple times + # (it's constant anyway) + # from logging.Formatter:format + if not record.exc_text: + record.exc_text = self.formatException(record.exc_info) + + if record.exc_text: + log_dict["exception"] = record.exc_text + + json_record = json.dumps(log_dict, default=self.default_json_formatter) + + if hasattr(json_record, "decode"): # pragma: no cover + json_record = json_record.decode("utf-8") + + return json_record diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index f61cf162405..9a943536b4e 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -1,6 +1,5 @@ import copy import functools -import json import logging import os import random @@ -9,6 +8,7 @@ from typing import Any, Callable, Dict, Union from .exceptions import InvalidLoggerSamplingRateError +from .formatter import JsonFormatter from .lambda_context import build_lambda_context_model logger = logging.getLogger(__name__) @@ -16,102 +16,6 @@ is_cold_start = True -def json_formatter(unserialized_value: Any): - """JSON custom serializer to cast unserialisable values to strings. - - Example - ------- - - **Serialize unserialisable value to string** - - class X: pass - value = {"x": X()} - - json.dumps(value, default=json_formatter) - - Parameters - ---------- - unserialized_value: Any - Python object unserializable by JSON - """ - return str(unserialized_value) - - -class JsonFormatter(logging.Formatter): - """AWS Lambda Logging formatter. - - Formats the log message as a JSON encoded string. If the message is a - dict it will be used directly. If the message can be parsed as JSON, then - the parse d value is used in the output record. - - Originally taken from https://gitlab.com/hadrien/aws_lambda_logging/ - - """ - - def __init__(self, **kwargs): - """Return a JsonFormatter instance. - - The `json_default` kwarg is used to specify a formatter for otherwise - unserialisable values. It must not throw. Defaults to a function that - coerces the value to a string. - - Other kwargs are used to specify log field format strings. - """ - datefmt = kwargs.pop("datefmt", None) - - super(JsonFormatter, self).__init__(datefmt=datefmt) - self.reserved_keys = ["timestamp", "level", "location"] - self.format_dict = { - "timestamp": "%(asctime)s", - "level": "%(levelname)s", - "location": "%(funcName)s:%(lineno)d", - } - self.format_dict.update(kwargs) - self.default_json_formatter = kwargs.pop("json_default", json_formatter) - - def format(self, record): # noqa: A003 - record_dict = record.__dict__.copy() - record_dict["asctime"] = self.formatTime(record, self.datefmt) - - log_dict = {} - for key, value in self.format_dict.items(): - if value and key in self.reserved_keys: - # converts default logging expr to its record value - # e.g. '%(asctime)s' to '2020-04-24 09:35:40,698' - log_dict[key] = value % record_dict - else: - log_dict[key] = value - - if isinstance(record_dict["msg"], dict): - log_dict["message"] = record_dict["msg"] - else: - log_dict["message"] = record.getMessage() - - # Attempt to decode the message as JSON, if so, merge it with the - # overall message for clarity. - try: - log_dict["message"] = json.loads(log_dict["message"]) - except (json.decoder.JSONDecodeError, TypeError, ValueError): - pass - - if record.exc_info: - # Cache the traceback text to avoid converting it multiple times - # (it's constant anyway) - # from logging.Formatter:format - if not record.exc_text: - record.exc_text = self.formatException(record.exc_info) - - if record.exc_text: - log_dict["exception"] = record.exc_text - - json_record = json.dumps(log_dict, default=self.default_json_formatter) - - if hasattr(json_record, "decode"): # pragma: no cover - json_record = json_record.decode("utf-8") - - return json_record - - def _is_cold_start() -> bool: """Verifies whether is cold start From f11c4194adf5e1e91eac1e48c69d885849ec764b Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Sun, 14 Jun 2020 15:40:51 +0100 Subject: [PATCH 5/9] chore: cleanup metrics tests --- tests/functional/test_metrics.py | 324 ++++++++++++------------------- 1 file changed, 122 insertions(+), 202 deletions(-) diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 244a56119cd..efc93daa739 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -52,17 +52,18 @@ def non_str_dimensions() -> List[Dict[str, Any]]: @pytest.fixture -def namespace() -> Dict[str, str]: +def namespace() -> str: return "test_namespace" @pytest.fixture -def a_hundred_metrics(namespace=namespace) -> List[Dict[str, str]]: - metrics = [] - for i in range(100): - metrics.append({"name": f"metric_{i}", "unit": "Count", "value": 1}) +def service() -> str: + return "test_service" + - return metrics +@pytest.fixture +def a_hundred_metrics(namespace=namespace) -> List[Dict[str, str]]: + return [{"name": f"metric_{i}", "unit": "Count", "value": 1} for i in range(100)] def serialize_metrics(metrics: List[Dict], dimensions: List[Dict], namespace: str) -> Dict: @@ -92,21 +93,23 @@ def remove_timestamp(metrics: List): del metric["_aws"]["Timestamp"] -def test_single_metric_one_metric_only(capsys, metric, dimension, namespace): - # GIVEN we attempt to add more than one metric +def capture_metrics_output(capsys): + return json.loads(capsys.readouterr().out.strip()) + + +def test_single_metric_logs_one_metric_only(capsys, metric, dimension, namespace): + # GIVEN we try adding more than one metric # WHEN using single_metric context manager with single_metric(namespace=namespace, **metric) as my_metric: my_metric.add_metric(name="second_metric", unit="Count", value=1) - my_metric.add_metric(name="third_metric", unit="Seconds", value=1) my_metric.add_dimension(**dimension) - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different - # THEN we should only have the first metric added - assert expected["_aws"] == output["_aws"] + remove_timestamp(metrics=[output, expected]) + assert expected == output def test_log_metrics(capsys, metrics, dimensions, namespace): @@ -121,65 +124,48 @@ def test_log_metrics(capsys, metrics, dimensions, namespace): # and flush all metrics at the end of a function execution @my_metrics.log_metrics def lambda_handler(evt, ctx): - return True + pass lambda_handler({}, {}) - - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace=namespace) - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different - # THEN we should have no exceptions - # and a valid EMF object should've been flushed correctly - assert expected["_aws"] == output["_aws"] - for dimension in dimensions: - assert dimension["name"] in output + # and a valid EMF object should be flushed correctly + remove_timestamp(metrics=[output, expected]) + assert expected == output def test_namespace_env_var(monkeypatch, capsys, metric, dimension, namespace): - # GIVEN we use POWERTOOLS_METRICS_NAMESPACE + # GIVEN POWERTOOLS_METRICS_NAMESPACE is set monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", namespace) - # WHEN creating a metric but don't explicitly - # add a namespace - with single_metric(**metric) as my_metrics: - my_metrics.add_dimension(**dimension) - monkeypatch.delenv("POWERTOOLS_METRICS_NAMESPACE") + # WHEN creating a metric without explicitly adding a namespace + with single_metric(**metric) as my_metric: + my_metric.add_dimension(**dimension) - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different - - # THEN we should add a namespace implicitly - # with the value of POWERTOOLS_METRICS_NAMESPACE env var - assert expected["_aws"] == output["_aws"] + # THEN we should add a namespace using POWERTOOLS_METRICS_NAMESPACE env var value + remove_timestamp(metrics=[output, expected]) + assert expected == output def test_service_env_var(monkeypatch, capsys, metric, namespace): # GIVEN we use POWERTOOLS_SERVICE_NAME monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", "test_service") - my_metrics = Metrics(namespace=namespace) - - # WHEN creating a metric but don't explicitly - # add a dimension - @my_metrics.log_metrics - def lambda_handler(evt, context): - my_metrics.add_metric(**metric) - return True - lambda_handler({}, {}) - - monkeypatch.delenv("POWERTOOLS_SERVICE_NAME") + # WHEN creating a metric without explicitly adding a dimension + with single_metric(**metric, namespace=namespace): + pass - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) expected_dimension = {"name": "service", "value": "test_service"} expected = serialize_single_metric(metric=metric, dimension=expected_dimension, namespace=namespace) - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different - - # THEN metrics should be logged using the implicitly created "service" dimension + # THEN a metric should be logged using the implicitly created "service" dimension + remove_timestamp(metrics=[output, expected]) assert expected == output @@ -194,7 +180,7 @@ def test_metrics_spillover(monkeypatch, capsys, metric, dimension, namespace, a_ # THEN it should serialize and flush all metrics at the 100th # and clear all metrics and dimensions from memory - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) spillover_metrics = output["_aws"]["CloudWatchMetrics"][0]["Metrics"] assert my_metrics.metric_set == {} assert len(spillover_metrics) == 100 @@ -206,87 +192,75 @@ def test_metrics_spillover(monkeypatch, capsys, metric, dimension, namespace, a_ # THEN serializing the 101th metric should # create a new EMF object with a single metric in it (101th) - # and contain have the same dimension we previously added + # and contain the same dimension we previously added serialized_101th_metric = my_metrics.serialize_metric_set() expected_101th_metric = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) remove_timestamp(metrics=[serialized_101th_metric, expected_101th_metric]) + assert serialized_101th_metric == expected_101th_metric - assert serialized_101th_metric["_aws"] == expected_101th_metric["_aws"] - -def test_log_metrics_should_invoke_function(metric, dimension, namespace): +def test_log_metrics_decorator_call_decorated_function(metric, namespace, service): # GIVEN Metrics is initialized - my_metrics = Metrics(namespace=namespace) + my_metrics = Metrics(service=service, namespace=namespace) # WHEN log_metrics is used to serialize metrics @my_metrics.log_metrics def lambda_handler(evt, context): - my_metrics.add_metric(**metric) - my_metrics.add_dimension(**dimension) return True # THEN log_metrics should invoke the function it decorates - # and return no error if we have a metric, namespace, and a dimension - lambda_handler({}, {}) + # and return no error if we have a namespace and dimension + assert lambda_handler({}, {}) is True -def test_incorrect_metric_unit(metric, dimension, namespace): - # GIVEN we pass a metric unit not supported by CloudWatch +def test_schema_validation_incorrect_metric_unit(metric, dimension, namespace): + # GIVEN we pass a metric unit that is not supported by CloudWatch metric["unit"] = "incorrect_unit" - # WHEN we attempt to add a new metric - # THEN it should fail validation and raise MetricUnitError + # WHEN we try adding a new metric + # THEN it should fail metric unit validation with pytest.raises(MetricUnitError): with single_metric(**metric) as my_metric: my_metric.add_dimension(**dimension) -def test_schema_no_namespace(metric, dimension): - # GIVEN we add any metric or dimension - # but no namespace - +def test_schema_validation_no_namespace(metric, dimension): + # GIVEN we don't add any namespace # WHEN we attempt to serialize a valid EMF object - # THEN it should fail validation and raise SchemaValidationError - with pytest.raises(SchemaValidationError): - with single_metric(**metric) as my_metric: - my_metric.add_dimension(**dimension) + # THEN it should fail namespace validation + with pytest.raises(SchemaValidationError, match=".*Namespace must be string"): + with single_metric(**metric): + pass -def test_schema_incorrect_value(metric, dimension, namespace): - # GIVEN we pass an incorrect metric value (non-number/float) +def test_schema_validation_incorrect_metric_value(metric, dimension, namespace): + # GIVEN we pass an incorrect metric value (non-numeric) metric["value"] = "some_value" # WHEN we attempt to serialize a valid EMF object # THEN it should fail validation and raise SchemaValidationError with pytest.raises(MetricValueError): - with single_metric(**metric) as my_metric: - my_metric.add_dimension(**dimension) + with single_metric(**metric): + pass -def test_schema_no_metrics(dimensions, namespace): +def test_schema_no_metrics(service, namespace): # GIVEN Metrics is initialized - my_metrics = Metrics(namespace=namespace) - - # WHEN no metrics have been added - # but a namespace and dimensions only - for dimension in dimensions: - my_metrics.add_dimension(**dimension) + my_metrics = Metrics(service=service, namespace=namespace) # THEN it should fail validation and raise SchemaValidationError - with pytest.raises(SchemaValidationError): + with pytest.raises(SchemaValidationError, match=".*Metrics must contain at least 1 items"): my_metrics.serialize_metric_set() def test_exceed_number_of_dimensions(metric, namespace): # GIVEN we we have more dimensions than CloudWatch supports - dimensions = [] - for i in range(11): - dimensions.append({"name": f"test_{i}", "value": "test"}) + dimensions = [{"name": f"test_{i}", "value": "test"} for i in range(11)] # WHEN we attempt to serialize them into a valid EMF object # THEN it should fail validation and raise SchemaValidationError - with pytest.raises(SchemaValidationError): - with single_metric(**metric) as my_metric: + with pytest.raises(SchemaValidationError, match="must contain less than or equal to 9 items"): + with single_metric(**metric, namespace=namespace) as my_metric: for dimension in dimensions: my_metric.add_dimension(**dimension) @@ -294,9 +268,8 @@ def test_exceed_number_of_dimensions(metric, namespace): def test_log_metrics_during_exception(capsys, metric, dimension, namespace): # GIVEN Metrics is initialized my_metrics = Metrics(namespace=namespace) - - my_metrics.add_metric(**metric) my_metrics.add_dimension(**dimension) + my_metrics.add_metric(**metric) # WHEN log_metrics is used to serialize metrics # but an error has been raised during handler execution @@ -307,31 +280,30 @@ def lambda_handler(evt, context): with pytest.raises(ValueError): lambda_handler({}, {}) - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different - # THEN we should log metrics and propagate the exception up - assert expected["_aws"] == output["_aws"] + # THEN we should log metrics either way + remove_timestamp(metrics=[output, expected]) + assert expected == output def test_log_metrics_raise_on_empty_metrics(capsys, metric, dimension, namespace): # GIVEN Metrics is initialized my_metrics = Metrics(service="test_service", namespace=namespace) + # WHEN log_metrics is used with raise_on_empty_metrics param and has no metrics @my_metrics.log_metrics(raise_on_empty_metrics=True) def lambda_handler(evt, context): - # WHEN log_metrics is used with raise_on_empty_metrics param and has no metrics - return True + pass # THEN the raised exception should be SchemaValidationError # and specifically about the lack of Metrics - with pytest.raises(SchemaValidationError, match="_aws\.CloudWatchMetrics\[0\]\.Metrics"): # noqa: W605 + with pytest.raises(SchemaValidationError, match=".*Metrics must contain at least 1 items"): lambda_handler({}, {}) def test_all_possible_metric_units(metric, dimension, namespace): - # GIVEN we add a metric for each metric unit supported by CloudWatch # where metric unit as MetricUnit key e.g. "Seconds", "BytesPerSecond" for unit in MetricUnit: @@ -344,9 +316,8 @@ def test_all_possible_metric_units(metric, dimension, namespace): # WHEN we iterate over all available metric unit keys from MetricUnit enum all_metric_units = [unit.value for unit in MetricUnit] - # metric unit as MetricUnit value e.g. "Seconds", "Bytes/Second" for unit in all_metric_units: - metric["unit"] = unit + metric["unit"] = unit # e.g. "Seconds", "Bytes/Second" # THEN we raise no MetricUnitError nor SchemaValidationError with single_metric(namespace=namespace, **metric) as my_metric: my_metric.add_dimension(**dimension) @@ -364,17 +335,15 @@ def test_metrics_reuse_metric_set(metric, dimension, namespace): assert my_metrics_2.metric_set == my_metrics.metric_set -def test_log_metrics_clear_metrics_after_invocation(metric, dimension, namespace): +def test_log_metrics_clear_metrics_after_invocation(metric, service, namespace): # GIVEN Metrics is initialized - my_metrics = Metrics(namespace=namespace) - + my_metrics = Metrics(service=service, namespace=namespace) my_metrics.add_metric(**metric) - my_metrics.add_dimension(**dimension) # WHEN log_metrics is used to flush metrics from memory @my_metrics.log_metrics def lambda_handler(evt, context): - return True + pass lambda_handler({}, {}) @@ -382,11 +351,10 @@ def lambda_handler(evt, context): assert my_metrics.metric_set == {} -def test_log_metrics_non_string_dimension_values(capsys, metrics, non_str_dimensions, namespace): +def test_log_metrics_non_string_dimension_values(capsys, service, metric, non_str_dimensions, namespace): # GIVEN Metrics is initialized and dimensions with non-string values are added - my_metrics = Metrics(namespace=namespace) - for metric in metrics: - my_metrics.add_metric(**metric) + my_metrics = Metrics(service=service, namespace=namespace) + my_metrics.add_metric(**metric) for dimension in non_str_dimensions: my_metrics.add_dimension(**dimension) @@ -394,179 +362,137 @@ def test_log_metrics_non_string_dimension_values(capsys, metrics, non_str_dimens # and flush all metrics at the end of a function execution @my_metrics.log_metrics def lambda_handler(evt, ctx): - return True + pass lambda_handler({}, {}) - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) # THEN we should have no exceptions - # and dimension values hould be serialized as strings + # and dimension values should be serialized as strings for dimension in non_str_dimensions: assert isinstance(output[dimension["name"]], str) -def test_log_metrics_with_explicit_namespace(capsys, metrics, dimensions, namespace): - # GIVEN Metrics is initialized with service specified - my_metrics = Metrics(service="test_service", namespace=namespace) - for metric in metrics: - my_metrics.add_metric(**metric) - for dimension in dimensions: - my_metrics.add_dimension(**dimension) +def test_log_metrics_with_explicit_namespace(capsys, metric, service, namespace): + # GIVEN Metrics is initialized with explicit namespace + my_metrics = Metrics(service=service, namespace=namespace) + my_metrics.add_metric(**metric) # WHEN we utilize log_metrics to serialize # and flush all metrics at the end of a function execution @my_metrics.log_metrics def lambda_handler(evt, ctx): - return True + pass lambda_handler({}, {}) - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) - dimensions.append({"name": "service", "value": "test_service"}) - expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace=namespace) + # THEN we should have no exceptions and the namespace should be set + # using the service value passed to Metrics constructor + assert namespace == output["_aws"]["CloudWatchMetrics"][0]["Namespace"] - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different - # THEN we should have no exceptions and the namespace should be set to the name provided in the - # service passed to Metrics constructor - assert expected == output - - -def test_log_metrics_with_implicit_dimensions(capsys, metrics, namespace): +def test_log_metrics_with_implicit_dimensions(capsys, metric, namespace, service): # GIVEN Metrics is initialized with service specified - my_metrics = Metrics(service="test_service", namespace=namespace) - for metric in metrics: - my_metrics.add_metric(**metric) + my_metrics = Metrics(service=service, namespace=namespace) + my_metrics.add_metric(**metric) # WHEN we utilize log_metrics to serialize and don't explicitly add any dimensions @my_metrics.log_metrics def lambda_handler(evt, ctx): - return True + pass lambda_handler({}, {}) - output = json.loads(capsys.readouterr().out.strip()) - - expected_dimensions = [{"name": "service", "value": "test_service"}] - expected = serialize_metrics(metrics=metrics, dimensions=expected_dimensions, namespace=namespace) - - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different + output = capture_metrics_output(capsys) # THEN we should have no exceptions and the dimensions should be set to the name provided in the # service passed to Metrics constructor - assert expected == output + assert service == output["service"] -def test_log_metrics_with_renamed_service(capsys, metrics, metric): +def test_log_metrics_with_renamed_service(capsys, metric, service): # GIVEN Metrics is initialized with service specified - my_metrics = Metrics(service="test_service", namespace="test_application") - for metric in metrics: - my_metrics.add_metric(**metric) + my_metrics = Metrics(service=service, namespace="test_application") + another_service_dimension = {"name": "service", "value": "another_test_service"} @my_metrics.log_metrics def lambda_handler(evt, ctx): # WHEN we manually call add_dimension to change the value of the service dimension - my_metrics.add_dimension(name="service", value="another_test_service") + my_metrics.add_dimension(**another_service_dimension) my_metrics.add_metric(**metric) - return True lambda_handler({}, {}) + output = capture_metrics_output(capsys) - output = json.loads(capsys.readouterr().out.strip()) lambda_handler({}, {}) - second_output = json.loads(capsys.readouterr().out.strip()) - - remove_timestamp(metrics=[output]) # Timestamp will always be different + second_output = capture_metrics_output(capsys) # THEN we should have no exceptions and the dimensions should be set to the name provided in the # add_dimension call - assert output["service"] == "another_test_service" - assert second_output["service"] == "another_test_service" - - -def test_single_metric_with_service(capsys, metric, dimension, namespace): - # GIVEN we pass namespace parameter to single_metric - - # WHEN creating a metric - with single_metric(**metric, namespace=namespace) as my_metrics: - my_metrics.add_dimension(**dimension) - - output = json.loads(capsys.readouterr().out.strip()) - expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) - - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different - - # THEN namespace should match value passed as service - assert expected["_aws"] == output["_aws"] + assert output["service"] == another_service_dimension["value"] + assert second_output["service"] == another_service_dimension["value"] def test_namespace_var_precedence(monkeypatch, capsys, metric, dimension, namespace): # GIVEN we use POWERTOOLS_METRICS_NAMESPACE - monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", namespace) + monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", "a_namespace") # WHEN creating a metric and explicitly set a namespace with single_metric(namespace=namespace, **metric) as my_metrics: my_metrics.add_dimension(**dimension) - monkeypatch.delenv("POWERTOOLS_METRICS_NAMESPACE") - output = json.loads(capsys.readouterr().out.strip()) - expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) - - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different + output = capture_metrics_output(capsys) # THEN namespace should match the explicitly passed variable and not the env var - assert expected["_aws"] == output["_aws"] + assert namespace == output["_aws"]["CloudWatchMetrics"][0]["Namespace"] -def test_emit_cold_start_metric(capsys, namespace): +def test_log_metrics_capture_cold_start_metric(capsys, namespace, service): # GIVEN Metrics is initialized - my_metrics = Metrics(service="test_service", namespace=namespace) + my_metrics = Metrics(service=service, namespace=namespace) # WHEN log_metrics is used with capture_cold_start_metric @my_metrics.log_metrics(capture_cold_start_metric=True) def lambda_handler(evt, context): - return True + pass LambdaContext = namedtuple("LambdaContext", "function_name") lambda_handler({}, LambdaContext("example_fn")) - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) # THEN ColdStart metric and function_name dimension should be logged assert output["ColdStart"] == 1 assert output["function_name"] == "example_fn" -def test_emit_cold_start_metric_only_once(capsys, namespace, dimension, metric): +def test_emit_cold_start_metric_only_once(capsys, namespace, service, metric): # GIVEN Metrics is initialized - my_metrics = Metrics(namespace=namespace) + my_metrics = Metrics(service=service, namespace=namespace) # WHEN log_metrics is used with capture_cold_start_metric # and handler is called more than once @my_metrics.log_metrics(capture_cold_start_metric=True) def lambda_handler(evt, context): my_metrics.add_metric(**metric) - my_metrics.add_dimension(**dimension) LambdaContext = namedtuple("LambdaContext", "function_name") lambda_handler({}, LambdaContext("example_fn")) - capsys.readouterr().out.strip() + _ = capture_metrics_output(capsys) # ignore first stdout captured - # THEN ColdStart metric and function_name dimension should be logged - # only once + # THEN ColdStart metric and function_name dimension should be logged once lambda_handler({}, LambdaContext("example_fn")) - - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) assert "ColdStart" not in output - assert "function_name" not in output -def test_log_metrics_decorator_no_metrics(dimensions, namespace): +def test_log_metrics_decorator_no_metrics_warning(dimensions, namespace, service): # GIVEN Metrics is initialized - my_metrics = Metrics(namespace=namespace, service="test_service") + my_metrics = Metrics(namespace=namespace, service=service) # WHEN using the log_metrics decorator and no metrics have been added @my_metrics.log_metrics @@ -580,28 +506,22 @@ def lambda_handler(evt, context): assert str(w[-1].message) == "No metrics to publish, skipping" -def test_log_metrics_with_implicit_dimensions_called_twice(capsys, metrics, namespace): +def test_log_metrics_with_implicit_dimensions_called_twice(capsys, metric, namespace, service): # GIVEN Metrics is initialized with service specified - my_metrics = Metrics(service="test_service", namespace=namespace) + my_metrics = Metrics(service=service, namespace=namespace) # WHEN we utilize log_metrics to serialize and don't explicitly add any dimensions, # and the lambda function is called more than once @my_metrics.log_metrics def lambda_handler(evt, ctx): - for metric in metrics: - my_metrics.add_metric(**metric) + my_metrics.add_metric(**metric) return True lambda_handler({}, {}) - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) lambda_handler({}, {}) - second_output = json.loads(capsys.readouterr().out.strip()) - - expected_dimensions = [{"name": "service", "value": "test_service"}] - expected = serialize_metrics(metrics=metrics, dimensions=expected_dimensions, namespace=namespace) - - remove_timestamp(metrics=[output, expected, second_output]) # Timestamp will always be different + second_output = capture_metrics_output(capsys) # THEN we should have no exceptions and the dimensions should be set to the name provided in the # service passed to Metrics constructor From 72f6b3bbbf9ee7982410cb9b256e0177bf3d407b Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Sun, 14 Jun 2020 15:56:16 +0100 Subject: [PATCH 6/9] chore: cleanup tracer tests --- tests/functional/test_tracing.py | 73 +++++++++++++++++--------------- 1 file changed, 39 insertions(+), 34 deletions(-) diff --git a/tests/functional/test_tracing.py b/tests/functional/test_tracing.py index 6cff182015e..cda0a85cc4d 100644 --- a/tests/functional/test_tracing.py +++ b/tests/functional/test_tracing.py @@ -14,120 +14,125 @@ def reset_tracing_config(): yield +@pytest.fixture +def service_name(): + return "booking" + + def test_capture_lambda_handler(dummy_response): - # GIVEN tracer is disabled, and decorator is used - # WHEN a lambda handler is run - # THEN tracer should not raise an Exception + # GIVEN tracer lambda handler decorator is used tracer = Tracer(disabled=True) + # WHEN a lambda handler is run @tracer.capture_lambda_handler def handler(event, context): return dummy_response + # THEN tracer should not raise an Exception handler({}, {}) def test_capture_method(dummy_response): - # GIVEN tracer is disabled, and method decorator is used - # WHEN a function is run - # THEN tracer should not raise an Exception - + # GIVEN tracer method decorator is used tracer = Tracer(disabled=True) + # WHEN a function is run @tracer.capture_method def greeting(name, message): return dummy_response + # THEN tracer should not raise an Exception greeting(name="Foo", message="Bar") def test_tracer_lambda_emulator(monkeypatch, dummy_response): - # GIVEN tracer is run locally - # WHEN a lambda function is run through SAM CLI - # THEN tracer should not raise an Exception + # GIVEN tracer runs locally monkeypatch.setenv("AWS_SAM_LOCAL", "true") tracer = Tracer() + # WHEN a lambda function is run through SAM CLI @tracer.capture_lambda_handler def handler(event, context): return dummy_response + # THEN tracer should run in disabled mode, and not raise an Exception handler({}, {}) - monkeypatch.delenv("AWS_SAM_LOCAL") def test_tracer_metadata_disabled(dummy_response): # GIVEN tracer is disabled, and annotations/metadata are used - # WHEN a lambda handler is run - # THEN tracer should not raise an Exception and simply ignore tracer = Tracer(disabled=True) + # WHEN a lambda handler is run @tracer.capture_lambda_handler def handler(event, context): tracer.put_annotation("PaymentStatus", "SUCCESS") tracer.put_metadata("PaymentMetadata", "Metadata") return dummy_response + # THEN tracer should not raise any Exception handler({}, {}) -def test_tracer_env_vars(monkeypatch): - # GIVEN tracer disabled, is run without parameters - # WHEN service is explicitly defined - # THEN tracer should have use that service name - service_name = "booking" +def test_tracer_service_env_var(monkeypatch, service_name): + # GIVEN tracer is run without parameters + # WHEN service is implicitly defined via env var monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", service_name) - tracer_env_var = Tracer(disabled=True) + tracer = Tracer(disabled=True) + + # THEN tracer should have use that service name + assert tracer.service == service_name - assert tracer_env_var.service == service_name +def test_tracer_explicit_service(monkeypatch, service_name): + # GIVEN tracer is disabled + # WHEN service is explicitly defined tracer_explicit = Tracer(disabled=True, service=service_name) assert tracer_explicit.service == service_name monkeypatch.setenv("POWERTOOLS_TRACE_DISABLED", "true") tracer = Tracer() - assert bool(tracer.disabled) is True + # THEN tracer should have use that service name + assert tracer.service == service_name -def test_tracer_with_exception(mocker): - # GIVEN tracer is disabled, decorator is used - # WHEN a lambda handler or method returns an Exception - # THEN tracer should reraise the same Exception +def test_tracer_propagate_exception(mocker): + # GIVEN tracer decorator is used class CustomException(Exception): pass tracer = Tracer(disabled=True) + # WHEN a lambda handler or method returns an Exception @tracer.capture_lambda_handler def handler(event, context): raise CustomException("test") @tracer.capture_method - def greeting(name, message): + def greeting(): raise CustomException("test") + # THEN tracer should reraise the same Exception with pytest.raises(CustomException): handler({}, {}) with pytest.raises(CustomException): - greeting(name="Foo", message="Bar") + greeting() -def test_tracer_reuse(): - # GIVEN tracer A, B were initialized - # WHEN tracer B explicitly reuses A config - # THEN tracer B attributes should be equal to tracer A - service_name = "booking" +def test_tracer_reuse_configuration(service_name): + # GIVEN tracer A is initialized tracer_a = Tracer(disabled=True, service=service_name) + # WHEN tracer B is initialized afterwards tracer_b = Tracer() - assert id(tracer_a) != id(tracer_b) + # THEN tracer B attributes should be equal to tracer A assert tracer_a.__dict__.items() == tracer_b.__dict__.items() def test_tracer_method_nested_sync(mocker): - # GIVEN tracer is disabled, decorator is used + # GIVEN tracer decorator is used # WHEN multiple sync functions are nested # THEN tracer should not raise a Runtime Error tracer = Tracer(disabled=True) From 8469fe82affbf144bf2058ee15b1385fa3f31522 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 15 Jun 2020 11:25:10 +0100 Subject: [PATCH 7/9] chore: cleanup logger tests --- aws_lambda_powertools/logging/formatter.py | 2 +- tests/functional/test_logger.py | 217 ++++++++++----------- 2 files changed, 106 insertions(+), 113 deletions(-) diff --git a/aws_lambda_powertools/logging/formatter.py b/aws_lambda_powertools/logging/formatter.py index 8aa07069f97..7b38524b6d1 100644 --- a/aws_lambda_powertools/logging/formatter.py +++ b/aws_lambda_powertools/logging/formatter.py @@ -85,7 +85,7 @@ def format(self, record): # noqa: A003 # Cache the traceback text to avoid converting it multiple times # (it's constant anyway) # from logging.Formatter:format - if not record.exc_text: + if not record.exc_text: # pragma: no cover record.exc_text = self.formatException(record.exc_info) if record.exc_text: diff --git a/tests/functional/test_logger.py b/tests/functional/test_logger.py index 6b6a4bb6dde..211f12f8fc1 100644 --- a/tests/functional/test_logger.py +++ b/tests/functional/test_logger.py @@ -7,7 +7,7 @@ from aws_lambda_powertools import Logger, Tracer from aws_lambda_powertools.logging.exceptions import InvalidLoggerSamplingRateError -from aws_lambda_powertools.logging.logger import JsonFormatter, set_package_logger +from aws_lambda_powertools.logging.logger import set_package_logger @pytest.fixture @@ -39,227 +39,220 @@ def lambda_context(): return namedtuple("LambdaContext", lambda_context.keys())(*lambda_context.values()) +@pytest.fixture +def lambda_event(): + return {"greeting": "hello"} + + +def capture_logging_output(stdout): + return json.loads(stdout.getvalue()) + + +def capture_multiple_logging_statements_output(stdout): + return [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] + + def test_setup_service_name(root_logger, stdout): - # GIVEN service is explicitly defined - # WHEN logger is setup - # THEN service field should be equals service given service_name = "payment" + # GIVEN Logger is initialized + # WHEN service is explicitly defined logger = Logger(service=service_name, stream=stdout) logger.info("Hello") - log = json.loads(stdout.getvalue()) + # THEN service field should be equals service given + log = capture_logging_output(stdout) assert service_name == log["service"] def test_setup_no_service_name(stdout): - # GIVEN no service is explicitly defined - # WHEN logger is setup - # THEN service field should be "service_undefined" + # GIVEN Logger is initialized + # WHEN no service is explicitly defined logger = Logger(stream=stdout) + logger.info("Hello") - log = json.loads(stdout.getvalue()) + # THEN service field should be "service_undefined" + log = capture_logging_output(stdout) assert "service_undefined" == log["service"] def test_setup_service_env_var(monkeypatch, stdout): - # GIVEN service is explicitly defined via POWERTOOLS_SERVICE_NAME env - # WHEN logger is setup - # THEN service field should be equals POWERTOOLS_SERVICE_NAME value service_name = "payment" + # GIVEN Logger is initialized + # WHEN service is explicitly defined via POWERTOOLS_SERVICE_NAME env monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", service_name) - logger = Logger(stream=stdout) + logger.info("Hello") - log = json.loads(stdout.getvalue()) + # THEN service field should be equals POWERTOOLS_SERVICE_NAME value + log = capture_logging_output(stdout) assert service_name == log["service"] -def test_setup_sampling_rate(monkeypatch, stdout): - # GIVEN samping rate is explicitly defined via POWERTOOLS_LOGGER_SAMPLE_RATE env - # WHEN logger is setup - # THEN sampling rate should be equals POWERTOOLS_LOGGER_SAMPLE_RATE value and should sample debug logs - +def test_setup_sampling_rate_env_var(monkeypatch, stdout): + # GIVEN Logger is initialized + # WHEN samping rate is explicitly set to 100% via POWERTOOLS_LOGGER_SAMPLE_RATE env sampling_rate = "1" monkeypatch.setenv("POWERTOOLS_LOGGER_SAMPLE_RATE", sampling_rate) - monkeypatch.setenv("LOG_LEVEL", "INFO") - - logger = Logger(stream=stdout) + logger = Logger(stream=stdout, level="INFO") logger.debug("I am being sampled") - log = json.loads(stdout.getvalue()) + # THEN sampling rate should be equals POWERTOOLS_LOGGER_SAMPLE_RATE value + # log level should be DEBUG + # and debug log statements should be in stdout + log = capture_logging_output(stdout) assert sampling_rate == log["sampling_rate"] assert "DEBUG" == log["level"] assert "I am being sampled" == log["message"] def test_inject_lambda_context(lambda_context, stdout): - # GIVEN a lambda function is decorated with logger - # WHEN logger is setup - # THEN lambda contextual info should always be in the logs - logger_context_keys = ( - "function_name", - "function_memory_size", - "function_arn", - "function_request_id", - ) - + # GIVEN Logger is initialized logger = Logger(stream=stdout) + # WHEN a lambda function is decorated with logger @logger.inject_lambda_context def handler(event, context): logger.info("Hello") handler({}, lambda_context) - log = json.loads(stdout.getvalue()) - - for key in logger_context_keys: + # THEN lambda contextual info should always be in the logs + log = capture_logging_output(stdout) + expected_logger_context_keys = ( + "function_name", + "function_memory_size", + "function_arn", + "function_request_id", + ) + for key in expected_logger_context_keys: assert key in log -def test_inject_lambda_context_log_event_request(lambda_context, stdout): - # GIVEN a lambda function is decorated with logger instructed to log event - # WHEN logger is setup - # THEN logger should log event received from Lambda - lambda_event = {"greeting": "hello"} - +def test_inject_lambda_context_log_event_request(lambda_context, stdout, lambda_event): + # GIVEN Logger is initialized logger = Logger(stream=stdout) + # WHEN a lambda function is decorated with logger instructed to log event @logger.inject_lambda_context(log_event=True) - # @logger.inject_lambda_context(log_event=True) def handler(event, context): logger.info("Hello") handler(lambda_event, lambda_context) - # Given that our string buffer has many log statements separated by newline \n - # We need to clean it before we can assert on - logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] - logged_event, _ = logs - assert "greeting" in logged_event["message"] + # THEN logger should log event received from Lambda + logged_event, _ = capture_multiple_logging_statements_output(stdout) + assert logged_event["message"] == lambda_event -def test_inject_lambda_context_log_event_request_env_var(monkeypatch, lambda_context, stdout): - # GIVEN a lambda function is decorated with logger instructed to log event - # via POWERTOOLS_LOGGER_LOG_EVENT env - # WHEN logger is setup - # THEN logger should log event received from Lambda - lambda_event = {"greeting": "hello"} +def test_inject_lambda_context_log_event_request_env_var(monkeypatch, lambda_context, stdout, lambda_event): + # GIVEN Logger is initialized monkeypatch.setenv("POWERTOOLS_LOGGER_LOG_EVENT", "true") - logger = Logger(stream=stdout) + # WHEN a lambda function is decorated with logger instructed to log event + # via POWERTOOLS_LOGGER_LOG_EVENT env @logger.inject_lambda_context def handler(event, context): logger.info("Hello") handler(lambda_event, lambda_context) - # Given that our string buffer has many log statements separated by newline \n - # We need to clean it before we can assert on - logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] - - event = {} - for log in logs: - if "greeting" in log["message"]: - event = log["message"] - - assert event == lambda_event - + # THEN logger should log event received from Lambda + logged_event, _ = capture_multiple_logging_statements_output(stdout) + assert logged_event["message"] == lambda_event -def test_inject_lambda_context_log_no_request_by_default(monkeypatch, lambda_context, stdout): - # GIVEN a lambda function is decorated with logger - # WHEN logger is setup - # THEN logger should not log event received by lambda handler - lambda_event = {"greeting": "hello"} +def test_inject_lambda_context_log_no_request_by_default(monkeypatch, lambda_context, stdout, lambda_event): + # GIVEN Logger is initialized logger = Logger(stream=stdout) + # WHEN a lambda function is decorated with logger @logger.inject_lambda_context def handler(event, context): logger.info("Hello") handler(lambda_event, lambda_context) - # Given that our string buffer has many log statements separated by newline \n - # We need to clean it before we can assert on - logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] - - event = {} - for log in logs: - if "greeting" in log["message"]: - event = log["message"] - - assert event != lambda_event + # THEN logger should not log event received by lambda handler + log = capture_logging_output(stdout) + assert log["message"] != lambda_event def test_inject_lambda_cold_start(lambda_context, stdout): - # GIVEN a lambda function is decorated with logger, and called twice - # WHEN logger is setup - # THEN cold_start key should only be true in the first call - + # cold_start can be false as it's a global variable in Logger module + # so we reset it to simulate the correct behaviour + # since Lambda will only import our logger lib once per concurrent execution from aws_lambda_powertools.logging import logger - # # As we run tests in parallel global cold_start value can be false - # # here we reset to simulate the correct behaviour - # # since Lambda will only import our logger lib once per concurrent execution logger.is_cold_start = True + # GIVEN Logger is initialized logger = Logger(stream=stdout) - def custom_method(): - logger.info("Hello from method") - + # WHEN a lambda function is decorated with logger, and called twice @logger.inject_lambda_context def handler(event, context): - custom_method() logger.info("Hello") handler({}, lambda_context) handler({}, lambda_context) - # Given that our string buffer has many log statements separated by newline \n - # We need to clean it before we can assert on - logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] - first_log, second_log, third_log, fourth_log = logs - - # First execution + # THEN cold_start key should only be true in the first call + first_log, second_log = capture_multiple_logging_statements_output(stdout) assert first_log["cold_start"] is True - assert second_log["cold_start"] is True - - # Second execution - assert third_log["cold_start"] is False - assert fourth_log["cold_start"] is False + assert second_log["cold_start"] is False -def test_package_logger(capsys): +def test_package_logger_stream(stdout): + # GIVEN package logger "aws_lambda_powertools" is explicitly set with no params + set_package_logger(stream=stdout) - set_package_logger() + # WHEN Tracer is initialized in disabled mode Tracer(disabled=True) - output = capsys.readouterr() - assert "Tracing has been disabled" in output.out + # THEN Tracer debug log statement should be logged + output = stdout.getvalue() + logger = logging.getLogger("aws_lambda_powertools") + assert "Tracing has been disabled" in output + assert logger.level == logging.DEBUG -def test_package_logger_format(stdout, capsys): - set_package_logger(stream=stdout, formatter=JsonFormatter(formatter="test")) +def test_package_logger_format(capsys): + # GIVEN package logger "aws_lambda_powertools" is explicitly + # with a custom formatter + formatter = logging.Formatter("message=%(message)s") + set_package_logger(formatter=formatter) + + # WHEN Tracer is initialized in disabled mode Tracer(disabled=True) - output = json.loads(stdout.getvalue().split("\n")[0]) - assert "test" in output["formatter"] + # THEN Tracer debug log statement should be logged using `message=` format + output = capsys.readouterr().out + logger = logging.getLogger("aws_lambda_powertools") + assert "message=" in output + assert logger.level == logging.DEBUG def test_logger_append_duplicated(stdout): + # GIVEN Logger is initialized with request_id field logger = Logger(stream=stdout, request_id="value") + + # WHEN `request_id` is appended to the existing structured log + # using a different value logger.structure_logs(append=True, request_id="new_value") logger.info("log") - log = json.loads(stdout.getvalue()) + + # THEN subsequent log statements should have the latest value + log = capture_logging_output(stdout) assert "new_value" == log["request_id"] def test_logger_invalid_sampling_rate(): + # GIVEN Logger is initialized + # WHEN sampling_rate non-numeric value + # THEN we should raise InvalidLoggerSamplingRateError with pytest.raises(InvalidLoggerSamplingRateError): Logger(sampling_rate="TEST") From 40fec56e1b7332b098129fa90519a7ad7a35de1d Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 15 Jun 2020 15:22:54 +0100 Subject: [PATCH 8/9] chore: cleanup tracer tests --- aws_lambda_powertools/tracing/tracer.py | 6 +- tests/unit/test_tracing.py | 109 +++++++++++++++++------- 2 files changed, 84 insertions(+), 31 deletions(-) diff --git a/aws_lambda_powertools/tracing/tracer.py b/aws_lambda_powertools/tracing/tracer.py index d31cbd61ebd..b97816f25b5 100644 --- a/aws_lambda_powertools/tracing/tracer.py +++ b/aws_lambda_powertools/tracing/tracer.py @@ -269,8 +269,10 @@ def decorate(event, context): function_name=lambda_handler_name, data=response, subsegment=subsegment ) except Exception as err: - logger.exception("Exception received from lambda handler") - self._add_full_exception_as_metadata(function_name=self.service, error=err, subsegment=subsegment) + logger.exception(f"Exception received from {lambda_handler_name}") + self._add_full_exception_as_metadata( + function_name=lambda_handler_name, error=err, subsegment=subsegment + ) raise return response diff --git a/tests/unit/test_tracing.py b/tests/unit/test_tracing.py index 65cb04c997b..d1e5408bb77 100644 --- a/tests/unit/test_tracing.py +++ b/tests/unit/test_tracing.py @@ -79,15 +79,20 @@ class In_subsegment(NamedTuple): def test_tracer_lambda_handler(mocker, dummy_response, provider_stub, in_subsegment_mock): + # GIVEN Tracer is initialized with booking as the service name provider = provider_stub(in_subsegment=in_subsegment_mock.in_subsegment) tracer = Tracer(provider=provider, service="booking") + # WHEN lambda_handler decorator is used @tracer.capture_lambda_handler def handler(event, context): return dummy_response handler({}, mocker.MagicMock()) + # THEN we should have a subsegment named handler + # annotate cold start, and add its response as trace metadata + # and use service name as a metadata namespace assert in_subsegment_mock.in_subsegment.call_count == 1 assert in_subsegment_mock.in_subsegment.call_args == mocker.call(name="## handler") assert in_subsegment_mock.put_metadata.call_args == mocker.call( @@ -98,19 +103,39 @@ def handler(event, context): def test_tracer_method(mocker, dummy_response, provider_stub, in_subsegment_mock): + # GIVEN Tracer is initialized with booking as the service name provider = provider_stub(in_subsegment=in_subsegment_mock.in_subsegment) - Tracer(provider=provider, service="booking") + tracer = Tracer(provider=provider, service="booking") + + # WHEN capture_method decorator is used + @tracer.capture_method + def greeting(name, message): + return dummy_response + + greeting(name="Foo", message="Bar") + + # THEN we should have a subsegment named after the method name + # and add its response as trace metadata + # and use service name as a metadata namespace + assert in_subsegment_mock.in_subsegment.call_count == 1 + assert in_subsegment_mock.in_subsegment.call_args == mocker.call(name="## greeting") + assert in_subsegment_mock.put_metadata.call_args == mocker.call( + key="greeting response", value=dummy_response, namespace="booking" + ) def test_tracer_custom_metadata(mocker, dummy_response, provider_stub): + # GIVEN Tracer is initialized with booking as the service name put_metadata_mock = mocker.MagicMock() - annotation_key = "Booking response" - annotation_value = {"bookingStatus": "CONFIRMED"} - provider = provider_stub(put_metadata_mock=put_metadata_mock) tracer = Tracer(provider=provider, service="booking") + + # WHEN put_metadata is used + annotation_key = "Booking response" + annotation_value = {"bookingStatus": "CONFIRMED"} tracer.put_metadata(annotation_key, annotation_value) + # THEN we should have metadata expected and booking as namespace assert put_metadata_mock.call_count == 1 assert put_metadata_mock.call_args_list[0] == mocker.call( key=annotation_key, value=annotation_value, namespace="booking" @@ -118,87 +143,97 @@ def test_tracer_custom_metadata(mocker, dummy_response, provider_stub): def test_tracer_custom_annotation(mocker, dummy_response, provider_stub): + # GIVEN Tracer is initialized put_annotation_mock = mocker.MagicMock() - annotation_key = "BookingId" - annotation_value = "123456" - provider = provider_stub(put_annotation_mock=put_annotation_mock) - tracer = Tracer(provider=provider, service="booking") + tracer = Tracer(provider=provider) + # WHEN put_metadata is used + annotation_key = "BookingId" + annotation_value = "123456" tracer.put_annotation(annotation_key, annotation_value) + # THEN we should have an annotation as expected assert put_annotation_mock.call_count == 1 assert put_annotation_mock.call_args == mocker.call(key=annotation_key, value=annotation_value) @mock.patch("aws_lambda_powertools.tracing.Tracer.patch") def test_tracer_autopatch(patch_mock): - # GIVEN tracer is instantiated - # WHEN default options were used, or patch() was called - # THEN tracer should patch all modules + # GIVEN tracer is initialized + # WHEN auto_patch hasn't been explicitly disabled Tracer(disabled=True) + + # THEN tracer should patch all modules assert patch_mock.call_count == 1 @mock.patch("aws_lambda_powertools.tracing.Tracer.patch") def test_tracer_no_autopatch(patch_mock): - # GIVEN tracer is instantiated + # GIVEN tracer is initialized # WHEN auto_patch is disabled - # THEN tracer should not patch any module Tracer(disabled=True, auto_patch=False) + + # THEN tracer should not patch any module assert patch_mock.call_count == 0 -def test_tracer_lambda_handler_empty_response_metadata(mocker, provider_stub): +def test_tracer_lambda_handler_does_not_add_empty_response_as_metadata(mocker, provider_stub): + # GIVEN tracer is initialized put_metadata_mock = mocker.MagicMock() provider = provider_stub(put_metadata_mock=put_metadata_mock) tracer = Tracer(provider=provider) + # WHEN capture_lambda_handler decorator is used + # and the handler response is empty @tracer.capture_lambda_handler def handler(event, context): return handler({}, mocker.MagicMock()) + # THEN we should not add empty metadata assert put_metadata_mock.call_count == 0 -def test_tracer_method_empty_response_metadata(mocker, provider_stub): +def test_tracer_method_does_not_add_empty_response_as_metadata(mocker, provider_stub): + # GIVEN tracer is initialized put_metadata_mock = mocker.MagicMock() provider = provider_stub(put_metadata_mock=put_metadata_mock) tracer = Tracer(provider=provider) + # WHEN capture_method decorator is used + # and the method response is empty @tracer.capture_method def greeting(name, message): return greeting(name="Foo", message="Bar") + # THEN we should not add empty metadata assert put_metadata_mock.call_count == 0 @mock.patch("aws_lambda_powertools.tracing.tracer.aws_xray_sdk.core.patch") -@mock.patch("aws_lambda_powertools.tracing.tracer.aws_xray_sdk.core.patch_all") -def test_tracer_patch(xray_patch_all_mock, xray_patch_mock, mocker): - # GIVEN tracer is instantiated - # WHEN default X-Ray provider client is mocked - # THEN tracer should run just fine - - Tracer() - assert xray_patch_all_mock.call_count == 1 - +def test_tracer_patch_modules(xray_patch_mock, mocker): + # GIVEN tracer is initialized with a list of modules to patch modules = ["boto3"] + + # WHEN modules are supported by X-Ray Tracer(service="booking", patch_modules=modules) + # THEN tracer should run just fine assert xray_patch_mock.call_count == 1 assert xray_patch_mock.call_args == mocker.call(modules) def test_tracer_method_exception_metadata(mocker, provider_stub, in_subsegment_mock): - + # GIVEN tracer is initialized provider = provider_stub(in_subsegment=in_subsegment_mock.in_subsegment) tracer = Tracer(provider=provider, service="booking") + # WHEN capture_method decorator is used + # and the method raises an exception @tracer.capture_method def greeting(name, message): raise ValueError("test") @@ -206,16 +241,20 @@ def greeting(name, message): with pytest.raises(ValueError): greeting(name="Foo", message="Bar") + # THEN we should add the exception using method name as key plus error + # and their service name as the namespace put_metadata_mock_args = in_subsegment_mock.put_metadata.call_args[1] assert put_metadata_mock_args["key"] == "greeting error" assert put_metadata_mock_args["namespace"] == "booking" def test_tracer_lambda_handler_exception_metadata(mocker, provider_stub, in_subsegment_mock): - + # GIVEN tracer is initialized provider = provider_stub(in_subsegment=in_subsegment_mock.in_subsegment) tracer = Tracer(provider=provider, service="booking") + # WHEN capture_lambda_handler decorator is used + # and the method raises an exception @tracer.capture_lambda_handler def handler(event, context): raise ValueError("test") @@ -223,16 +262,21 @@ def handler(event, context): with pytest.raises(ValueError): handler({}, mocker.MagicMock()) + # THEN we should add the exception using handler name as key plus error + # and their service name as the namespace put_metadata_mock_args = in_subsegment_mock.put_metadata.call_args[1] - assert put_metadata_mock_args["key"] == "booking error" + assert put_metadata_mock_args["key"] == "handler error" + assert put_metadata_mock_args["namespace"] == "booking" @pytest.mark.asyncio async def test_tracer_method_nested_async(mocker, dummy_response, provider_stub, in_subsegment_mock): + # GIVEN tracer is initialized provider = provider_stub(in_subsegment_async=in_subsegment_mock.in_subsegment) tracer = Tracer(provider=provider, service="booking") + # WHEN capture_method decorator is used for nested async methods @tracer.capture_method async def greeting_2(name, message): return dummy_response @@ -250,6 +294,7 @@ async def greeting(name, message): ) = in_subsegment_mock.in_subsegment.call_args_list put_metadata_greeting2_call_args, put_metadata_greeting_call_args = in_subsegment_mock.put_metadata.call_args_list + # THEN we should add metadata for each response like we would for a sync decorated method assert in_subsegment_mock.in_subsegment.call_count == 2 assert in_subsegment_greeting_call_args == mocker.call(name="## greeting") assert in_subsegment_greeting2_call_args == mocker.call(name="## greeting_2") @@ -265,9 +310,10 @@ async def greeting(name, message): @pytest.mark.asyncio async def test_tracer_method_nested_async_disabled(dummy_response): - + # GIVEN tracer is initialized and explicitly disabled tracer = Tracer(service="booking", disabled=True) + # WHEN capture_method decorator is used @tracer.capture_method async def greeting_2(name, message): return dummy_response @@ -277,16 +323,19 @@ async def greeting(name, message): await greeting_2(name, message) return dummy_response + # THEN we should run the decorator methods without side effects ret = await greeting(name="Foo", message="Bar") - assert ret == dummy_response @pytest.mark.asyncio async def test_tracer_method_exception_metadata_async(mocker, provider_stub, in_subsegment_mock): + # GIVEN tracer is initialized provider = provider_stub(in_subsegment_async=in_subsegment_mock.in_subsegment) tracer = Tracer(provider=provider, service="booking") + # WHEN capture_method decorator is used in an async method + # and the method raises an exception @tracer.capture_method async def greeting(name, message): raise ValueError("test") @@ -294,6 +343,8 @@ async def greeting(name, message): with pytest.raises(ValueError): await greeting(name="Foo", message="Bar") + # THEN we should add the exception using method name as key plus error + # and their service name as the namespace put_metadata_mock_args = in_subsegment_mock.put_metadata.call_args[1] assert put_metadata_mock_args["key"] == "greeting error" assert put_metadata_mock_args["namespace"] == "booking" From c94b6b96f51f1af8b8bfb1c4610799b7c296d9ed Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 15 Jun 2020 15:29:11 +0100 Subject: [PATCH 9/9] chore: set test coverage to 90% min Signed-off-by: heitorlessa --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 59d5b2bbbc0..c147ab1c9a7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,6 +58,7 @@ directory = "test_report" title = "Lambda Powertools Test Coverage" [tool.coverage.report] +fail_under = 90 exclude_lines = [ # Have to re-enable the standard pragma "pragma: no cover",