diff --git a/aws_lambda_powertools/metrics/base.py b/aws_lambda_powertools/metrics/base.py index b96356192ab..59daafa0bb1 100644 --- a/aws_lambda_powertools/metrics/base.py +++ b/aws_lambda_powertools/metrics/base.py @@ -328,6 +328,28 @@ def clear_metrics(self) -> None: self.dimension_set.clear() self.metadata_set.clear() + def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: + """Manually flushes the metrics. This is normally not necessary, + unless you're running on other runtimes besides Lambda, where the @log_metrics + decorator already handles things for you. + + Parameters + ---------- + raise_on_empty_metrics : bool, optional + raise exception if no metrics are emitted, by default False + """ + if not raise_on_empty_metrics and not self.metric_set: + warnings.warn( + "No application metrics to publish. The cold-start metric may be published if enabled. " + "If application metrics should never be empty, consider using 'raise_on_empty_metrics'", + stacklevel=2, + ) + else: + logger.debug("Flushing existing metrics") + metrics = self.serialize_metric_set() + print(json.dumps(metrics, separators=(",", ":"))) + self.clear_metrics() + def log_metrics( self, lambda_handler: Union[Callable[[Dict, Any], Any], Optional[Callable[[Dict, Any, Optional[Dict]], Any]]] = None, @@ -390,16 +412,7 @@ def decorate(event, context): if capture_cold_start_metric: self._add_cold_start_metric(context=context) finally: - if not raise_on_empty_metrics and not self.metric_set: - warnings.warn( - "No application metrics to publish. The cold-start metric may be published if enabled. " - "If application metrics should never be empty, consider using 'raise_on_empty_metrics'", - stacklevel=2, - ) - else: - metrics = self.serialize_metric_set() - self.clear_metrics() - print(json.dumps(metrics, separators=(",", ":"))) + self.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics) return response diff --git a/docs/core/metrics.md b/docs/core/metrics.md index 81acd8999d8..ba9f746e867 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -251,13 +251,15 @@ By default it will skip all previously defined dimensions including default dime ### Flushing metrics manually -If you prefer not to use `log_metrics` because you might want to encapsulate additional logic when doing so, you can manually flush and clear metrics as follows: +If you are using the AWS Lambda Web Adapter project, or a middleware with custom metric logic, you can use `flush_metrics()`. This method will serialize, print metrics available to standard output, and clear in-memory metrics data. ???+ warning - Metrics, dimensions and namespace validation still applies + This does not capture Cold Start metrics, and metric data validation still applies. -```python hl_lines="11-14" title="Manually flushing and clearing metrics from memory" ---8<-- "examples/metrics/src/single_metric.py" +Contrary to the `log_metrics` decorator, you are now also responsible to flush metrics in the event of an exception. + +```python hl_lines="18" title="Manually flushing and clearing metrics from memory" +--8<-- "examples/metrics/src/flush_metrics.py" ``` ### Metrics isolation diff --git a/examples/metrics/src/manual_flush.py b/examples/metrics/src/flush_metrics.py similarity index 62% rename from examples/metrics/src/manual_flush.py rename to examples/metrics/src/flush_metrics.py index def0f845d08..a66ce07cbf7 100644 --- a/examples/metrics/src/manual_flush.py +++ b/examples/metrics/src/flush_metrics.py @@ -1,5 +1,3 @@ -import json - from aws_lambda_powertools import Metrics from aws_lambda_powertools.metrics import MetricUnit from aws_lambda_powertools.utilities.typing import LambdaContext @@ -7,8 +5,14 @@ metrics = Metrics() -def lambda_handler(event: dict, context: LambdaContext): +def book_flight(flight_id: str, **kwargs): + # logic to book flight + ... metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) - your_metrics_object = metrics.serialize_metric_set() - metrics.clear_metrics() - print(json.dumps(your_metrics_object)) + + +def lambda_handler(event: dict, context: LambdaContext): + try: + book_flight(flight_id=event.get("flight_id", "")) + finally: + metrics.flush_metrics() diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index c0c41f3bf88..964af99ce6e 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -249,6 +249,26 @@ def lambda_handler(evt, ctx): assert expected == output +def test_log_metrics_manual_flush(capsys, metrics, dimensions, namespace): + # GIVEN Metrics is initialized + my_metrics = Metrics(namespace=namespace) + for metric in metrics: + my_metrics.add_metric(**metric) + for dimension in dimensions: + my_metrics.add_dimension(**dimension) + + # WHEN we manually the metrics + my_metrics.flush_metrics() + + output = capture_metrics_output(capsys) + expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace=namespace) + + # THEN we should have no exceptions + # and a valid EMF object should be flushed correctly + remove_timestamp(metrics=[output, expected]) + assert expected == output + + def test_namespace_env_var(monkeypatch, capsys, metric, dimension, namespace): # GIVEN POWERTOOLS_METRICS_NAMESPACE is set monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", namespace)