From 6618bf3a278b54a870e74bdac52b0d0738e9aa37 Mon Sep 17 00:00:00 2001 From: Tom McCarthy Date: Mon, 8 Jun 2020 16:40:40 +0200 Subject: [PATCH 1/8] feat: dont throw exception by default from log_metrics if no metrics are emitted --- aws_lambda_powertools/metrics/metrics.py | 22 +++++++++++++++++----- tests/functional/test_metrics.py | 24 ++++++++++++++++++++---- 2 files changed, 37 insertions(+), 9 deletions(-) diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index 2c095782e73..d17f7772af3 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -82,7 +82,12 @@ def clear_metrics(self): self.metric_set.clear() self.dimension_set.clear() - def log_metrics(self, lambda_handler: Callable[[Any, Any], Any] = None, capture_cold_start_metric: bool = False): + def log_metrics( + self, + lambda_handler: Callable[[Any, Any], Any] = None, + capture_cold_start_metric: bool = False, + raise_for_empty_metrics: bool = False, + ): """Decorator to serialize and publish metrics at the end of a function execution. Be aware that the log_metrics **does call* the decorated function (e.g. lambda_handler). @@ -102,6 +107,10 @@ def handler(event, context) ---------- lambda_handler : Callable[[Any, Any], Any], optional Lambda function handler, by default None + capture_cold_start_metric : bool, optional + Captures cold start metric, by default False + raise_for_empty_metrics : bool, optional + Raise exception if no metrics are emitted, by default False Raises ------ @@ -122,10 +131,13 @@ def decorate(event, context): if capture_cold_start_metric: self.__add_cold_start_metric(context=context) finally: - metrics = self.serialize_metric_set() - self.clear_metrics() - logger.debug("Publishing metrics", {"metrics": metrics}) - print(json.dumps(metrics)) + if not raise_for_empty_metrics and not self.metric_set: + logger.debug("No metrics to publish, skipping") + else: + metrics = self.serialize_metric_set() + self.clear_metrics() + logger.debug("Publishing metrics", {"metrics": metrics}) + print(json.dumps(metrics)) return response diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 7f3a57c9538..75dac615df1 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -351,14 +351,14 @@ def test_log_no_metrics_error_propagation(capsys, metric, dimension, namespace): # GIVEN Metrics is initialized my_metrics = Metrics() - @my_metrics.log_metrics + @my_metrics.log_metrics(raise_for_empty_metrics=True) def lambda_handler(evt, context): - # WHEN log_metrics is used despite having no metrics + # WHEN log_metrics is used with raise_for_empty_metrics param and has no metrics # and the function decorated also raised an exception raise ValueError("Bubble up") - # THEN we should first raise SchemaValidationError as the main exception - with pytest.raises(SchemaValidationError): + # THEN the raised exception should be + with pytest.raises(ValueError): lambda_handler({}, {}) @@ -633,3 +633,19 @@ def lambda_handler(evt, context): assert "ColdStart" not in output assert "function_name" not in output + + +def test_log_metrics_decorator_no_metrics(capsys, dimensions, namespace): + # GIVEN Metrics is initialized + my_metrics = Metrics(namespace=namespace["name"], service="test_service") + + # WHEN using the log_metrics decorator and no metrics have been added + @my_metrics.log_metrics + def lambda_handler(evt, context): + pass + + # THEN it should not throw an exception, and should not log anything + LambdaContext = namedtuple("LambdaContext", "function_name") + lambda_handler({}, LambdaContext("example_fn")) + + assert capsys.readouterr().out == "" From 42bb1a0c22c70227984f44299fc97eb62268d60e Mon Sep 17 00:00:00 2001 From: Tom McCarthy Date: Mon, 8 Jun 2020 16:57:02 +0200 Subject: [PATCH 2/8] docs: update details for change to error handling --- docs/content/core/metrics.mdx | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/content/core/metrics.mdx b/docs/content/core/metrics.mdx index bd1de65f88c..3c8a13b2e50 100644 --- a/docs/content/core/metrics.mdx +++ b/docs/content/core/metrics.mdx @@ -103,15 +103,17 @@ def lambda_handler(evt, ctx): ... ``` -`log_metrics` decorator **validates**, **serializes**, and **flushes** all your metrics. During metrics validation, if any of the following criteria is met, `SchemaValidationError` exception will be raised: +`log_metrics` decorator **validates**, **serializes**, and **flushes** all your metrics. During metrics validation, if no metrics are provided then no exception will be raised. If metrics are provided, and any of the following criteria are not met, `SchemaValidationError` exception will be raised: -* At least of one Metric and Dimension +* Minimum of 1 dimension * Maximum of 9 dimensions * Namespace is set, and no more than one * Metric units must be [supported by CloudWatch](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html) +If you want to ensure that at least 1 metric is emitted, you can pass `raise_for_empty_metrics` to the `log_metrics` decorator: `@metrics.log_metrics(raise_for_empty_metrics=True)` + - When nesting multiple middlwares, you should use log_metrics as your last decorator wrapping all subsequent ones. + When nesting multiple middlewares, you should use log_metrics as your last decorator wrapping all subsequent ones.
```python:title=lambda_handler_nested_middlewares.py From 5e728f7940b1ed57a4c59f8ed969e0e6b4e680ad Mon Sep 17 00:00:00 2001 From: Tom McCarthy Date: Mon, 8 Jun 2020 21:03:51 +0200 Subject: [PATCH 3/8] chore: rename parameter for clarity --- aws_lambda_powertools/metrics/metrics.py | 6 +++--- docs/content/core/metrics.mdx | 2 +- tests/functional/test_metrics.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index d17f7772af3..1e59ddf58e5 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -86,7 +86,7 @@ def log_metrics( self, lambda_handler: Callable[[Any, Any], Any] = None, capture_cold_start_metric: bool = False, - raise_for_empty_metrics: bool = False, + raise_on_empty_metrics: bool = False, ): """Decorator to serialize and publish metrics at the end of a function execution. @@ -109,7 +109,7 @@ def handler(event, context) Lambda function handler, by default None capture_cold_start_metric : bool, optional Captures cold start metric, by default False - raise_for_empty_metrics : bool, optional + raise_on_empty_metrics : bool, optional Raise exception if no metrics are emitted, by default False Raises @@ -131,7 +131,7 @@ def decorate(event, context): if capture_cold_start_metric: self.__add_cold_start_metric(context=context) finally: - if not raise_for_empty_metrics and not self.metric_set: + if not raise_on_empty_metrics and not self.metric_set: logger.debug("No metrics to publish, skipping") else: metrics = self.serialize_metric_set() diff --git a/docs/content/core/metrics.mdx b/docs/content/core/metrics.mdx index 3c8a13b2e50..f61468edc4b 100644 --- a/docs/content/core/metrics.mdx +++ b/docs/content/core/metrics.mdx @@ -110,7 +110,7 @@ def lambda_handler(evt, ctx): * Namespace is set, and no more than one * Metric units must be [supported by CloudWatch](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html) -If you want to ensure that at least 1 metric is emitted, you can pass `raise_for_empty_metrics` to the `log_metrics` decorator: `@metrics.log_metrics(raise_for_empty_metrics=True)` +If you want to ensure that at least 1 metric is emitted, you can pass `raise_on_empty_metrics` to the `log_metrics` decorator: `@metrics.log_metrics(raise_on_empty_metrics=True)` When nesting multiple middlewares, you should use log_metrics as your last decorator wrapping all subsequent ones. diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 75dac615df1..2b1ea3db2d0 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -351,9 +351,9 @@ def test_log_no_metrics_error_propagation(capsys, metric, dimension, namespace): # GIVEN Metrics is initialized my_metrics = Metrics() - @my_metrics.log_metrics(raise_for_empty_metrics=True) + @my_metrics.log_metrics(raise_on_empty_metrics=True) def lambda_handler(evt, context): - # WHEN log_metrics is used with raise_for_empty_metrics param and has no metrics + # WHEN log_metrics is used with raise_on_empty_metrics param and has no metrics # and the function decorated also raised an exception raise ValueError("Bubble up") From e204aa52bf166bf2599c1c23bf6de9a8279014bb Mon Sep 17 00:00:00 2001 From: Tom McCarthy Date: Mon, 8 Jun 2020 21:15:32 +0200 Subject: [PATCH 4/8] fix: correct bug in exception handling from previous commits add raise_on_empty_metrics param to partial func call revert mistake in change to test for exception propagation --- aws_lambda_powertools/metrics/metrics.py | 6 +++++- tests/functional/test_metrics.py | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index 1e59ddf58e5..468bc9ab54f 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -122,7 +122,11 @@ def handler(event, context) # Return a partial function with args filled if lambda_handler is None: logger.debug("Decorator called with parameters") - return functools.partial(self.log_metrics, capture_cold_start_metric=capture_cold_start_metric) + return functools.partial( + self.log_metrics, + capture_cold_start_metric=capture_cold_start_metric, + raise_on_empty_metrics=raise_on_empty_metrics, + ) @functools.wraps(lambda_handler) def decorate(event, context): diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 2b1ea3db2d0..71a3a31424c 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -358,7 +358,7 @@ def lambda_handler(evt, context): raise ValueError("Bubble up") # THEN the raised exception should be - with pytest.raises(ValueError): + with pytest.raises(SchemaValidationError): lambda_handler({}, {}) From 0195d38d87b231bbad3d842c8fea9bae12f5e773 Mon Sep 17 00:00:00 2001 From: Tom McCarthy Date: Tue, 9 Jun 2020 11:44:35 +0200 Subject: [PATCH 5/8] improv: change log debug statement to warning when no metrics are present --- aws_lambda_powertools/metrics/metrics.py | 3 ++- tests/functional/test_metrics.py | 13 +++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index 468bc9ab54f..d481f5396db 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -2,6 +2,7 @@ import json import logging import os +import warnings from typing import Any, Callable from aws_lambda_powertools.metrics.base import MetricManager @@ -136,7 +137,7 @@ def decorate(event, context): self.__add_cold_start_metric(context=context) finally: if not raise_on_empty_metrics and not self.metric_set: - logger.debug("No metrics to publish, skipping") + warnings.warn("No metrics to publish, skipping") else: metrics = self.serialize_metric_set() self.clear_metrics() diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 71a3a31424c..c19ee2b5abd 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -1,4 +1,5 @@ import json +import warnings from collections import namedtuple from typing import Any, Dict, List @@ -635,7 +636,7 @@ def lambda_handler(evt, context): assert "function_name" not in output -def test_log_metrics_decorator_no_metrics(capsys, dimensions, namespace): +def test_log_metrics_decorator_no_metrics(dimensions, namespace): # GIVEN Metrics is initialized my_metrics = Metrics(namespace=namespace["name"], service="test_service") @@ -644,8 +645,8 @@ def test_log_metrics_decorator_no_metrics(capsys, dimensions, namespace): def lambda_handler(evt, context): pass - # THEN it should not throw an exception, and should not log anything - LambdaContext = namedtuple("LambdaContext", "function_name") - lambda_handler({}, LambdaContext("example_fn")) - - assert capsys.readouterr().out == "" + # THEN it should raise a warning instead of throwing an exception + with warnings.catch_warnings(record=True) as w: + lambda_handler({}, {}) + assert len(w) == 1 + assert str(w[-1].message) == "No metrics to publish, skipping" From 9be19497269e4fa3a80102ac82d9015d4d8067ea Mon Sep 17 00:00:00 2001 From: Tom McCarthy Date: Tue, 9 Jun 2020 11:57:29 +0200 Subject: [PATCH 6/8] docs: add note on suppressing warning for empty metric set --- docs/content/core/metrics.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/content/core/metrics.mdx b/docs/content/core/metrics.mdx index f61468edc4b..e18fbef2baa 100644 --- a/docs/content/core/metrics.mdx +++ b/docs/content/core/metrics.mdx @@ -103,14 +103,14 @@ def lambda_handler(evt, ctx): ... ``` -`log_metrics` decorator **validates**, **serializes**, and **flushes** all your metrics. During metrics validation, if no metrics are provided then no exception will be raised. If metrics are provided, and any of the following criteria are not met, `SchemaValidationError` exception will be raised: +`log_metrics` decorator **validates**, **serializes**, and **flushes** all your metrics. During metrics validation, if no metrics are provided then a warning will be logged, but no exception will be raised. If metrics are provided, and any of the following criteria are not met, `SchemaValidationError` exception will be raised: * Minimum of 1 dimension * Maximum of 9 dimensions * Namespace is set, and no more than one * Metric units must be [supported by CloudWatch](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html) -If you want to ensure that at least 1 metric is emitted, you can pass `raise_on_empty_metrics` to the `log_metrics` decorator: `@metrics.log_metrics(raise_on_empty_metrics=True)` +If you want to ensure that at least 1 metric is emitted, you can pass `raise_on_empty_metrics` to the `log_metrics` decorator: `@metrics.log_metrics(raise_on_empty_metrics=True)`. If you expect your function to execute without publishing metrics every time, you can suppress the warning with `warnings.filterwarnings("ignore", "No metrics to publish*")`. When nesting multiple middlewares, you should use log_metrics as your last decorator wrapping all subsequent ones. From 66b76e0d9a55e03a8057ba937a40393eee52e5b8 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Tue, 9 Jun 2020 15:34:24 +0100 Subject: [PATCH 7/8] improv: add warning for manual serialization Signed-off-by: heitorlessa --- docs/content/core/metrics.mdx | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/content/core/metrics.mdx b/docs/content/core/metrics.mdx index e18fbef2baa..c7dc83be42e 100644 --- a/docs/content/core/metrics.mdx +++ b/docs/content/core/metrics.mdx @@ -135,6 +135,10 @@ def lambda_handler(evt, ctx): If you prefer not to use `log_metrics` because you might want to encapsulate additional logic when doing so, you can manually flush and clear metrics as follows: + + Metrics, dimensions and namespace validation still applies. +
+ ```python:title=manual_metric_serialization.py import json from aws_lambda_powertools.metrics import Metrics, MetricUnit From a78dc10020ce50f0e056dade827cb327c1ecc89a Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Tue, 9 Jun 2020 15:35:30 +0100 Subject: [PATCH 8/8] improv: whitespace and warning supress as info Signed-off-by: heitorlessa --- docs/content/core/metrics.mdx | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/docs/content/core/metrics.mdx b/docs/content/core/metrics.mdx index c7dc83be42e..23292295f68 100644 --- a/docs/content/core/metrics.mdx +++ b/docs/content/core/metrics.mdx @@ -103,14 +103,28 @@ def lambda_handler(evt, ctx): ... ``` -`log_metrics` decorator **validates**, **serializes**, and **flushes** all your metrics. During metrics validation, if no metrics are provided then a warning will be logged, but no exception will be raised. If metrics are provided, and any of the following criteria are not met, `SchemaValidationError` exception will be raised: +`log_metrics` decorator **validates**, **serializes**, and **flushes** all your metrics. During metrics validation, if no metrics are provided then a warning will be logged, but no exception will be raised. + +If metrics are provided, and any of the following criteria are not met, `SchemaValidationError` exception will be raised: * Minimum of 1 dimension * Maximum of 9 dimensions * Namespace is set, and no more than one * Metric units must be [supported by CloudWatch](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html) -If you want to ensure that at least 1 metric is emitted, you can pass `raise_on_empty_metrics` to the `log_metrics` decorator: `@metrics.log_metrics(raise_on_empty_metrics=True)`. If you expect your function to execute without publishing metrics every time, you can suppress the warning with `warnings.filterwarnings("ignore", "No metrics to publish*")`. +If you want to ensure that at least one metric is emitted, you can pass `raise_on_empty_metrics` to the **log_metrics** decorator: + +```python:title=lambda_handler.py +from aws_lambda_powertools.metrics import Metrics + +@metrics.log_metrics(raise_on_empty_metrics=True) # highlight-line +def lambda_handler(evt, ctx): + ... +``` + + + If you expect your function to execute without publishing metrics every time, you can suppress the warning with warnings.filterwarnings("ignore", "No metrics to publish*"). +
When nesting multiple middlewares, you should use log_metrics as your last decorator wrapping all subsequent ones.