diff --git a/aws_lambda_powertools/metrics/functions.py b/aws_lambda_powertools/metrics/functions.py
index 14c68e88275..75bf0855e18 100644
--- a/aws_lambda_powertools/metrics/functions.py
+++ b/aws_lambda_powertools/metrics/functions.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import os
from datetime import datetime
from aws_lambda_powertools.metrics.provider.cloudwatch_emf.exceptions import (
@@ -8,6 +9,7 @@
)
from aws_lambda_powertools.metrics.provider.cloudwatch_emf.metric_properties import MetricResolution, MetricUnit
from aws_lambda_powertools.shared import constants
+from aws_lambda_powertools.shared.functions import strtobool
def extract_cloudwatch_metric_resolution_value(metric_resolutions: list, resolution: int | MetricResolution) -> int:
@@ -134,3 +136,28 @@ def convert_timestamp_to_emf_format(timestamp: int | datetime) -> int:
# Returning zero represents the initial date of epoch time,
# which will be skipped by Amazon CloudWatch.
return 0
+
+
+def is_metrics_disabled() -> bool:
+ """
+ Determine if metrics should be disabled based on environment variables.
+
+ Returns:
+ bool: True if metrics are disabled, False otherwise.
+
+ Rules:
+ - If POWERTOOLS_DEV is True and POWERTOOLS_METRICS_DISABLED is True: Disable metrics
+ - If POWERTOOLS_METRICS_DISABLED is True: Disable metrics
+ - If POWERTOOLS_DEV is True and POWERTOOLS_METRICS_DISABLED is not set: Disable metrics
+ """
+
+ is_dev_mode = strtobool(os.getenv(constants.POWERTOOLS_DEV_ENV, "false"))
+ is_metrics_disabled = strtobool(os.getenv(constants.METRICS_DISABLED_ENV, "false"))
+
+ disable_conditions = [
+ is_metrics_disabled,
+ is_metrics_disabled and is_dev_mode,
+ is_dev_mode and os.getenv(constants.METRICS_DISABLED_ENV) is None,
+ ]
+
+ return any(disable_conditions)
diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py
index 0bcf54917ed..a6493f25516 100644
--- a/aws_lambda_powertools/metrics/metrics.py
+++ b/aws_lambda_powertools/metrics/metrics.py
@@ -47,6 +47,8 @@ def lambda_handler():
metric namespace
POWERTOOLS_SERVICE_NAME : str
service name used for default dimension
+ POWERTOOLS_METRICS_DISABLED: bool
+ Powertools metrics disabled (e.g. `"true", "True", "TRUE"`)
Parameters
----------
diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py
index 7a7db86c9c6..65c5b619f57 100644
--- a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py
+++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py
@@ -15,6 +15,7 @@
convert_timestamp_to_emf_format,
extract_cloudwatch_metric_resolution_value,
extract_cloudwatch_metric_unit_value,
+ is_metrics_disabled,
validate_emf_timestamp,
)
from aws_lambda_powertools.metrics.provider.base import BaseProvider
@@ -77,6 +78,7 @@ def __init__(
self.default_dimensions = default_dimensions or {}
self.namespace = resolve_env_var_choice(choice=namespace, env=os.getenv(constants.METRICS_NAMESPACE_ENV))
self.service = resolve_env_var_choice(choice=service, env=os.getenv(constants.SERVICE_NAME_ENV))
+
self.metadata_set = metadata_set if metadata_set is not None else {}
self.timestamp: int | None = None
@@ -127,6 +129,7 @@ def add_metric(
MetricResolutionError
When metric resolution is not supported by CloudWatch
"""
+
if not isinstance(value, numbers.Number):
raise MetricValueError(f"{value} is not a valid number")
@@ -268,6 +271,7 @@ def add_dimension(self, name: str, value: str) -> None:
value : str
Dimension value
"""
+
logger.debug(f"Adding dimension: {name}:{value}")
if len(self.dimension_set) == MAX_DIMENSIONS:
raise SchemaValidationError(
@@ -374,7 +378,7 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None:
"If application metrics should never be empty, consider using 'raise_on_empty_metrics'",
stacklevel=2,
)
- else:
+ elif not is_metrics_disabled():
logger.debug("Flushing existing metrics")
metrics = self.serialize_metric_set()
print(json.dumps(metrics, separators=(",", ":")))
diff --git a/aws_lambda_powertools/metrics/provider/datadog/datadog.py b/aws_lambda_powertools/metrics/provider/datadog/datadog.py
index 0442af3f86c..3e88523df38 100644
--- a/aws_lambda_powertools/metrics/provider/datadog/datadog.py
+++ b/aws_lambda_powertools/metrics/provider/datadog/datadog.py
@@ -10,6 +10,7 @@
from typing import TYPE_CHECKING, Any
from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError
+from aws_lambda_powertools.metrics.functions import is_metrics_disabled
from aws_lambda_powertools.metrics.provider import BaseProvider
from aws_lambda_powertools.metrics.provider.datadog.warnings import DatadogDataValidationWarning
from aws_lambda_powertools.shared import constants
@@ -99,7 +100,6 @@ def add_metric(
>>> sales='sam'
>>> )
"""
-
# validating metric name
if not self._validate_datadog_metric_name(name):
docs = "https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics"
@@ -180,6 +180,7 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None:
raise_on_empty_metrics : bool, optional
raise exception if no metrics are emitted, by default False
"""
+
if not raise_on_empty_metrics and len(self.metric_set) == 0:
warnings.warn(
"No application metrics to publish. The cold-start metric may be published if enabled. "
@@ -200,7 +201,7 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None:
timestamp=metric_item["e"],
tags=metric_item["t"],
)
- else:
+ elif not is_metrics_disabled():
# dd module not found: flush to log, this format can be recognized via datadog log forwarder
# https://github.com/Datadog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77
for metric_item in metrics:
diff --git a/aws_lambda_powertools/shared/constants.py b/aws_lambda_powertools/shared/constants.py
index 9652e09a0b2..199f37d99bb 100644
--- a/aws_lambda_powertools/shared/constants.py
+++ b/aws_lambda_powertools/shared/constants.py
@@ -40,6 +40,7 @@
METRICS_NAMESPACE_ENV: str = "POWERTOOLS_METRICS_NAMESPACE"
DATADOG_FLUSH_TO_LOG: str = "DD_FLUSH_TO_LOG"
SERVICE_NAME_ENV: str = "POWERTOOLS_SERVICE_NAME"
+METRICS_DISABLED_ENV: str = "POWERTOOLS_METRICS_DISABLED"
# If the timestamp of log event is more than 2 hours in future, the log event is skipped.
# If the timestamp of log event is more than 14 days in past, the log event is skipped.
# See https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AgentReference.html
diff --git a/docs/core/metrics.md b/docs/core/metrics.md
index 6fdcf1fa043..88f0292231d 100644
--- a/docs/core/metrics.md
+++ b/docs/core/metrics.md
@@ -34,12 +34,16 @@ If you're new to Amazon CloudWatch, there are five terminologies you must be awa
???+ tip
All examples shared in this documentation are available within the [project repository](https://github.com/aws-powertools/powertools-lambda-python/tree/develop/examples){target="_blank"}.
-Metric has two global settings that will be used across all metrics emitted:
+Metric has three global settings that will be used across all metrics emitted:
-| Setting | Description | Environment variable | Constructor parameter |
-| -------------------- | ------------------------------------------------------------------------------- | ------------------------------ | --------------------- |
-| **Metric namespace** | Logical container where all metrics will be placed e.g. `ServerlessAirline` | `POWERTOOLS_METRICS_NAMESPACE` | `namespace` |
-| **Service** | Optionally, sets **service** metric dimension across all metrics e.g. `payment` | `POWERTOOLS_SERVICE_NAME` | `service` |
+| Setting | Description | Environment variable | Constructor parameter |
+| ------------------------------- | ------------------------------------------------------------------------------- | ------------------------------ | --------------------- |
+| **Metric namespace** | Logical container where all metrics will be placed e.g. `ServerlessAirline` | `POWERTOOLS_METRICS_NAMESPACE` | `namespace` |
+| **Service** | Optionally, sets **service** metric dimension across all metrics e.g. `payment` | `POWERTOOLS_SERVICE_NAME` | `service` |
+| **Disable Powertools Metrics** | Optionally, disables all Powertools metrics. | `POWERTOOLS_METRICS_DISABLED` | N/A |
+
+???+ info
+ `POWERTOOLS_METRICS_DISABLED` will not disable default metrics created by AWS services.
???+ tip
Use your application or main service as the metric namespace to easily group all metrics.
@@ -79,7 +83,7 @@ You can create metrics using `add_metric`, and you can create dimensions for all
CloudWatch EMF supports a max of 100 metrics per batch. Metrics utility will flush all metrics when adding the 100th metric. Subsequent metrics (101th+) will be aggregated into a new EMF object, for your convenience.
???+ warning "Warning: Do not create metrics or dimensions outside the handler"
- Metrics or dimensions added in the global scope will only be added during cold start. Disregard if you that's the intended behavior.
+ Metrics or dimensions added in the global scope will only be added during cold start. Disregard if that's the intended behavior.
### Adding high-resolution metrics
diff --git a/docs/core/metrics/datadog.md b/docs/core/metrics/datadog.md
index 3cf38e1c425..c5b9fdc35b8 100644
--- a/docs/core/metrics/datadog.md
+++ b/docs/core/metrics/datadog.md
@@ -23,7 +23,7 @@ stateDiagram-v2
DatadogExtension --> Datadog: async
state LambdaExtension {
- DatadogExtension
+ DatadogExtension
}
```
@@ -174,10 +174,14 @@ This has the advantage of keeping cold start metric separate from your applicati
You can use any of the following environment variables to configure `DatadogMetrics`:
-| Setting | Description | Environment variable | Constructor parameter |
-| -------------------- | -------------------------------------------------------------------------------- | ------------------------------ | --------------------- |
-| **Metric namespace** | Logical container where all metrics will be placed e.g. `ServerlessAirline` | `POWERTOOLS_METRICS_NAMESPACE` | `namespace` |
-| **Flush to log** | Use this when you want to flush metrics to be exported through Datadog Forwarder | `DD_FLUSH_TO_LOG` | `flush_to_log` |
+| Setting | Description | Environment variable | Constructor parameter |
+| ------------------------------ | -------------------------------------------------------------------------------- | ------------------------------ | --------------------- |
+| **Metric namespace** | Logical container where all metrics will be placed e.g. `ServerlessAirline` | `POWERTOOLS_METRICS_NAMESPACE` | `namespace` |
+| **Flush to log** | Use this when you want to flush metrics to be exported through Datadog Forwarder | `DD_FLUSH_TO_LOG` | `flush_to_log` |
+| **Disable Powertools Metrics** | Optionally, disables all Powertools metrics. | `POWERTOOLS_METRICS_DISABLED` | N/A |
+
+???+ info
+ `POWERTOOLS_METRICS_DISABLED` will not disable default metrics created by AWS services.
## Advanced
diff --git a/docs/index.md b/docs/index.md
index f2155db96af..4f5c165f287 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -432,6 +432,7 @@ When `POWERTOOLS_DEV` is set to a truthy value (`1`, `true`), it'll have the fol
| __Logger__ | Increase JSON indentation to 4. This will ease local debugging when running functions locally under emulators or direct calls while not affecting unit tests.
However, Amazon CloudWatch Logs view will degrade as each new line is treated as a new message. |
| __Event Handler__ | Enable full traceback errors in the response, indent request/responses, and CORS in dev mode (`*`). |
| __Tracer__ | Future-proof safety to disables tracing operations in non-Lambda environments. This already happens automatically in the Tracer utility. |
+| __Metrics__ | Disables Powertools metrics emission by default.
However, this can be overridden by explicitly setting POWERTOOLS_METRICS_DISABLED=false, which takes precedence over the dev mode setting. |
## Debug mode
diff --git a/tests/functional/metrics/datadog/test_metrics_datadog.py b/tests/functional/metrics/datadog/test_metrics_datadog.py
index 2626b8755c6..631518287a0 100644
--- a/tests/functional/metrics/datadog/test_metrics_datadog.py
+++ b/tests/functional/metrics/datadog/test_metrics_datadog.py
@@ -334,3 +334,163 @@ def test_namespace_env_var(monkeypatch):
# THEN namespace should match the explicitly passed variable and not the env var
assert output[0]["m"] == f"{env_namespace}.item_sold"
+
+
+def test_metrics_disabled_with_env_var(monkeypatch, capsys):
+ # GIVEN environment variable is set to disable metrics
+ monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "true")
+
+ # WHEN metrics is initialized and adding metrics
+ metrics = DatadogMetrics()
+ metrics.add_metric(name="test_metric", value=1)
+ metrics.flush_metrics()
+
+ # THEN no metrics should have been recorded
+ captured = capsys.readouterr()
+ assert not captured.out
+
+
+def test_metrics_disabled_persists_after_flush(monkeypatch, capsys):
+ # GIVEN environment variable is set to disable metrics
+ monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "true")
+ metrics = DatadogMetrics()
+
+ # WHEN multiple operations are performed with flush in between
+ metrics.add_metric(name="metric1", value=1)
+ metrics.flush_metrics()
+
+ # THEN first flush should not emit any metrics
+ captured = capsys.readouterr()
+ assert not captured.out
+
+ # WHEN adding and flushing more metrics
+ metrics.add_metric(name="metric2", value=2)
+ metrics.flush_metrics()
+
+ # THEN second flush should also not emit any metrics
+ captured = capsys.readouterr()
+ assert not captured.out
+
+
+def test_metrics_disabled_with_namespace(monkeypatch, capsys):
+ # GIVEN environment variable is set to disable metrics
+ monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "true")
+
+ # WHEN metrics is initialized with namespace and service
+ metrics = DatadogMetrics(namespace="test_namespace")
+ metrics.add_metric(name="test_metric", value=1)
+ metrics.flush_metrics()
+
+ # THEN no metrics should have been recorded
+ captured = capsys.readouterr()
+ assert not captured.out
+
+
+def test_metrics_disabled_with_dev_mode_true(monkeypatch, capsys):
+ # GIVEN dev mode is enabled
+ monkeypatch.setenv("POWERTOOLS_DEV", "true")
+
+ # WHEN metrics is initialized
+ metrics = DatadogMetrics(namespace="test")
+ metrics.add_metric(name="test_metric", value=1)
+ metrics.flush_metrics()
+
+ # THEN no metrics should have been recorded
+ captured = capsys.readouterr()
+ assert not captured.out
+
+
+def test_metrics_enabled_with_env_var_false(monkeypatch, capsys):
+ # GIVEN environment variable is set to enable metrics
+ monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "false")
+
+ # WHEN metrics is initialized with namespace and metrics added
+ metrics = DatadogMetrics(namespace="test")
+ metrics.add_metric(name="test_metric", value=1)
+ metrics.flush_metrics()
+
+ # THEN Datadog metrics should be written to stdout
+ output = capsys.readouterr().out
+ metrics_output = json.loads(output)
+
+ assert metrics_output
+
+
+def test_metrics_enabled_with_env_var_not_set(monkeypatch, capsys):
+ # GIVEN environment variable is not set
+ monkeypatch.delenv("POWERTOOLS_METRICS_DISABLED", raising=False)
+
+ # WHEN metrics is initialized with namespace and metrics added
+ metrics = DatadogMetrics(namespace="test")
+ metrics.add_metric(name="test_metric", value=1)
+ metrics.flush_metrics()
+
+ # THEN metrics should be written to stdout
+ output = capsys.readouterr().out
+ metrics_output = json.loads(output)
+
+ assert "test.test_metric" in metrics_output["m"]
+
+
+def test_metrics_enabled_with_dev_mode_false(monkeypatch, capsys):
+ # GIVEN dev mode is disabled
+ monkeypatch.setenv("POWERTOOLS_DEV", "false")
+
+ # WHEN metrics is initialized
+ metrics = DatadogMetrics(namespace="test")
+ metrics.add_metric(name="test_metric", value=1)
+ metrics.flush_metrics()
+
+ # THEN metrics should be written to stdout
+ output = capsys.readouterr().out
+ metrics_output = json.loads(output)
+ assert metrics_output
+
+
+def test_metrics_disabled_dev_mode_overrides_metrics_disabled(monkeypatch, capsys):
+ # GIVEN dev mode is enabled but metrics disabled is false
+ monkeypatch.setenv("POWERTOOLS_DEV", "true")
+ monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "false")
+
+ # WHEN metrics is initialized
+ metrics = DatadogMetrics(namespace="test")
+ metrics.add_metric(name="test_metric", value=1)
+ metrics.flush_metrics()
+
+ # THEN metrics should be written to stdout since POWERTOOLS_METRICS_DISABLED is false
+ output = capsys.readouterr().out
+ assert output # First verify we have output
+ metrics_output = json.loads(output)
+ assert metrics_output # Then verify it's valid JSON
+ assert "test.test_metric" in metrics_output["m"] # Verify the metric is present
+
+
+def test_metrics_enabled_with_both_false(monkeypatch, capsys):
+ # GIVEN both dev mode and metrics disabled are false
+ monkeypatch.setenv("POWERTOOLS_DEV", "false")
+ monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "false")
+
+ # WHEN metrics is initialized
+ metrics = DatadogMetrics(namespace="test")
+ metrics.add_metric(name="test_metric", value=1)
+ metrics.flush_metrics()
+
+ # THEN metrics should be written to stdout
+ output = capsys.readouterr().out
+ metrics_output = json.loads(output)
+ assert metrics_output
+
+
+def test_metrics_disabled_with_dev_mode_false_and_metrics_disabled_true(monkeypatch, capsys):
+ # GIVEN dev mode is false but metrics disabled is true
+ monkeypatch.setenv("POWERTOOLS_DEV", "false")
+ monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "true")
+
+ # WHEN metrics is initialized
+ metrics = DatadogMetrics(namespace="test")
+ metrics.add_metric(name="test_metric", value=1)
+ metrics.flush_metrics()
+
+ # THEN no metrics should have been recorded
+ captured = capsys.readouterr()
+ assert not captured.out
diff --git a/tests/functional/metrics/required_dependencies/test_metrics_cloudwatch_emf.py b/tests/functional/metrics/required_dependencies/test_metrics_cloudwatch_emf.py
index 5633d573a54..29418c42bcf 100644
--- a/tests/functional/metrics/required_dependencies/test_metrics_cloudwatch_emf.py
+++ b/tests/functional/metrics/required_dependencies/test_metrics_cloudwatch_emf.py
@@ -1329,3 +1329,160 @@ def lambda_handler(evt, ctx):
"This metric doesn't meet the requirements and will be skipped by Amazon CloudWatch. "
"Ensure the timestamp is within 14 days past or 2 hours future."
)
+
+
+def test_metrics_disabled_with_env_var(monkeypatch, namespace, capsys):
+ # GIVEN environment variable is set to disable metrics
+ monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "true")
+
+ # WHEN metrics is initialized and adding metrics
+ metrics = Metrics(namespace=namespace)
+ metrics.add_metric(name="test_metric", unit="Count", value=1)
+ metrics.flush_metrics()
+
+ # THEN no Powertools metrics should be sent to CloudWatch
+ output = capsys.readouterr()
+ assert not output.out
+
+
+def test_metrics_disabled_persists_after_flush(monkeypatch, capsys, namespace):
+ # GIVEN environment variable is set to disable metrics
+ monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "true")
+ metrics = Metrics(namespace=namespace)
+
+ # WHEN multiple operations are performed with flush in between
+ metrics.add_metric(name="metric1", unit="Count", value=1)
+ metrics.flush_metrics()
+
+ # THEN first flush should not emit any metrics
+ captured = capsys.readouterr()
+ assert not captured.out
+
+ # WHEN adding and flushing more metrics
+ metrics.add_metric(name="metric2", unit="Count", value=2)
+ metrics.flush_metrics()
+
+ # THEN second flush should also not emit any metrics
+ captured = capsys.readouterr()
+ assert not captured.out
+
+
+def test_metrics_disabled_with_namespace_and_service(monkeypatch, capsys):
+ # GIVEN environment variable is set to disable metrics
+ monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "true")
+
+ # WHEN metrics is initialized with namespace and service
+ metrics = Metrics(namespace="test_namespace", service="test_service")
+ metrics.add_metric(name="test_metric", unit="Count", value=1)
+ metrics.flush_metrics()
+
+ # THEN no metrics should have been recorded
+ captured = capsys.readouterr()
+ assert not captured.out
+
+
+def test_metrics_enabled_with_env_var_false(monkeypatch, capsys):
+ # GIVEN environment variable is set to enable metrics
+ monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "false")
+
+ # WHEN metrics is initialized with namespace and metrics added
+ metrics = Metrics(namespace="test")
+ metrics.add_metric(name="test_metric", unit="Count", value=1)
+ metrics.flush_metrics()
+
+ # THEN metrics should be written to stdout
+ output = capsys.readouterr().out
+ metrics_output = json.loads(output)
+
+ assert "test_metric" in metrics_output
+ assert metrics_output["test_metric"] == [1.0]
+ assert metrics_output["_aws"]["CloudWatchMetrics"][0]["Namespace"] == "test"
+ assert metrics_output["_aws"]["CloudWatchMetrics"][0]["Metrics"][0]["Name"] == "test_metric"
+
+
+def test_metrics_enabled_with_env_var_not_set(monkeypatch, capsys):
+ # GIVEN environment variable is not set
+ monkeypatch.delenv("POWERTOOLS_METRICS_DISABLED", raising=False)
+
+ # WHEN metrics is initialized with namespace and metrics added
+ metrics = Metrics(namespace="test")
+ metrics.add_metric(name="test_metric", unit="Count", value=1)
+ metrics.flush_metrics()
+
+ # THEN metrics should be written to stdout
+ output = capsys.readouterr().out
+ metrics_output = json.loads(output)
+
+ assert "test_metric" in metrics_output
+ assert metrics_output["test_metric"] == [1.0]
+ assert metrics_output["_aws"]["CloudWatchMetrics"][0]["Namespace"] == "test"
+ assert metrics_output["_aws"]["CloudWatchMetrics"][0]["Metrics"][0]["Name"] == "test_metric"
+
+
+def test_metrics_disabled_with_dev_mode(monkeypatch, namespace, capsys):
+ # GIVEN environment variable is set to disable metrics
+ monkeypatch.setenv("POWERTOOLS_DEV", "true")
+
+ # WHEN metrics is initialized and adding metrics
+ metrics = Metrics(namespace=namespace)
+ metrics.add_metric(name="test_metric", unit="Count", value=1)
+
+ # AND flushing metrics
+ metrics.flush_metrics()
+
+ # THEN no metrics should have been recorded
+ captured = capsys.readouterr()
+ assert not captured.out
+
+
+def test_metrics_enabled_with_dev_mode_false(monkeypatch, capsys):
+ # GIVEN environment variable is set to enable metrics
+ monkeypatch.setenv("POWERTOOLS_DEV", "false")
+
+ # WHEN metrics is initialized with namespace and metrics added
+ metrics = Metrics(namespace="test")
+ metrics.add_metric(name="test_metric", unit="Count", value=1)
+ metrics.flush_metrics()
+
+ # THEN metrics should be written to stdout
+ output = capsys.readouterr().out
+ metrics_output = json.loads(output)
+
+ assert "test_metric" in metrics_output
+ assert metrics_output["test_metric"] == [1.0]
+ assert metrics_output["_aws"]["CloudWatchMetrics"][0]["Namespace"] == "test"
+ assert metrics_output["_aws"]["CloudWatchMetrics"][0]["Metrics"][0]["Name"] == "test_metric"
+
+
+def test_metrics_dev_mode_does_not_override_metrics_disabled(monkeypatch, capsys):
+ # GIVEN dev mode is enabled but metrics disabled is explicitly false
+ monkeypatch.setenv("POWERTOOLS_DEV", "true")
+ monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "false")
+
+ # WHEN metrics is initialized
+ metrics = Metrics(namespace="test")
+ metrics.add_metric(name="test_metric", value=1, unit="Count")
+ metrics.flush_metrics()
+
+ # THEN metrics should be written to stdout since POWERTOOLS_METRICS_DISABLED is false
+ output = capsys.readouterr().out
+ assert output # First verify we have output
+ metrics_output = json.loads(output)
+ assert metrics_output
+ assert "_aws" in metrics_output
+ assert any(metric["Name"] == "test_metric" for metric in metrics_output["_aws"]["CloudWatchMetrics"][0]["Metrics"])
+
+
+def test_metrics_disabled_with_dev_mode_false_and_metrics_disabled_true(monkeypatch, capsys):
+ # GIVEN dev mode is false but metrics disabled is true
+ monkeypatch.setenv("POWERTOOLS_DEV", "false")
+ monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "true")
+
+ # WHEN metrics is initialized
+ metrics = Metrics(namespace="test")
+ metrics.add_metric(name="test_metric", value=1, unit="Count")
+ metrics.flush_metrics()
+
+ # THEN no metrics should have been recorded
+ captured = capsys.readouterr()
+ assert not captured.out