Skip to content

Commit 073254c

Browse files
committed
feat: allow metric validation to be disabled
1 parent b4d0baa commit 073254c

File tree

5 files changed

+136
-12
lines changed

5 files changed

+136
-12
lines changed

aws_lambda_powertools/metrics/base.py

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,9 @@ def add_metric(self, name: str, unit: Union[MetricUnit, str], value: float):
138138
# since we could have more than 100 metrics
139139
self.metric_set.clear()
140140

141-
def serialize_metric_set(self, metrics: Dict = None, dimensions: Dict = None, metadata: Dict = None) -> Dict:
141+
def serialize_metric_set(
142+
self, metrics: Dict = None, dimensions: Dict = None, metadata: Dict = None, validate_metrics: bool = True
143+
) -> Dict:
142144
"""Serializes metric and dimensions set
143145
144146
Parameters
@@ -149,6 +151,8 @@ def serialize_metric_set(self, metrics: Dict = None, dimensions: Dict = None, me
149151
Dictionary of dimensions to serialize, by default None
150152
metadata: Dict, optional
151153
Dictionary of metadata to serialize, by default None
154+
validate_metrics: bool, optional
155+
Whether to validate metrics against schema
152156
153157
Example
154158
-------
@@ -209,13 +213,19 @@ def serialize_metric_set(self, metrics: Dict = None, dimensions: Dict = None, me
209213
**metric_names_and_values, # "single_metric": 1.0
210214
}
211215

216+
if validate_metrics:
217+
self._validate_metrics(metrics=embedded_metrics_object)
218+
219+
return embedded_metrics_object
220+
221+
@staticmethod
222+
def _validate_metrics(metrics: Dict, schema: Dict = CLOUDWATCH_EMF_SCHEMA):
212223
try:
213224
logger.debug("Validating serialized metrics against CloudWatch EMF schema")
214-
fastjsonschema.validate(definition=CLOUDWATCH_EMF_SCHEMA, data=embedded_metrics_object)
225+
fastjsonschema.validate(definition=schema, data=metrics)
215226
except fastjsonschema.JsonSchemaException as e:
216227
message = f"Invalid format. Error: {e.message}, Invalid item: {e.name}" # noqa: B306, E501
217228
raise SchemaValidationError(message)
218-
return embedded_metrics_object
219229

220230
def add_dimension(self, name: str, value: str):
221231
"""Adds given dimension to all metrics

aws_lambda_powertools/metrics/metric.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def add_metric(self, name: str, unit: MetricUnit, value: float):
6161

6262

6363
@contextmanager
64-
def single_metric(name: str, unit: MetricUnit, value: float, namespace: str = None):
64+
def single_metric(name: str, unit: MetricUnit, value: float, namespace: str = None, validate_metrics: bool = True):
6565
"""Context manager to simplify creation of a single metric
6666
6767
Example
@@ -94,6 +94,8 @@ def single_metric(name: str, unit: MetricUnit, value: float, namespace: str = No
9494
Metric value
9595
namespace: str
9696
Namespace for metrics
97+
validate_metrics: bool, optional
98+
Whether to validate metrics against schema, by default True
9799
98100
Yields
99101
-------
@@ -102,14 +104,18 @@ def single_metric(name: str, unit: MetricUnit, value: float, namespace: str = No
102104
103105
Raises
104106
------
105-
e
106-
Propagate error received
107+
MetricUnitError
108+
When metric metric isn't supported by CloudWatch
109+
MetricValueError
110+
When metric value isn't a number
111+
SchemaValidationError
112+
When metric object fails EMF schema validation
107113
"""
108114
metric_set = None
109115
try:
110116
metric: SingleMetric = SingleMetric(namespace=namespace)
111117
metric.add_metric(name=name, unit=unit, value=value)
112118
yield metric
113-
metric_set: Dict = metric.serialize_metric_set()
119+
metric_set: Dict = metric.serialize_metric_set(validate_metrics=validate_metrics)
114120
finally:
115121
print(json.dumps(metric_set))

aws_lambda_powertools/metrics/metrics.py

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -57,25 +57,34 @@ def do_something():
5757
5858
Parameters
5959
----------
60-
MetricManager : MetricManager
61-
Inherits from `aws_lambda_powertools.metrics.base.MetricManager`
60+
service : str, optional
61+
service name to be used as metric dimension, by default "service_undefined"
62+
namespace : str
63+
Namespace for metrics
64+
validate_metrics: bool, optional
65+
Whether to validate metrics against schema, by default True
6266
6367
Raises
6468
------
65-
e
66-
Propagate error received
69+
MetricUnitError
70+
When metric metric isn't supported by CloudWatch
71+
MetricValueError
72+
When metric value isn't a number
73+
SchemaValidationError
74+
When metric object fails EMF schema validation
6775
"""
6876

6977
_metrics = {}
7078
_dimensions = {}
7179
_metadata = {}
7280

73-
def __init__(self, service: str = None, namespace: str = None):
81+
def __init__(self, service: str = None, namespace: str = None, validate_metrics: bool = True):
7482
self.metric_set = self._metrics
7583
self.dimension_set = self._dimensions
7684
self.service = service
7785
self.namespace = namespace
7886
self.metadata_set = self._metadata
87+
self.validate_metrics = validate_metrics
7988

8089
super().__init__(
8190
metric_set=self.metric_set,

tests/performance/conftest.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
import time
2+
from contextlib import contextmanager
3+
from typing import Generator
4+
5+
6+
@contextmanager
7+
def timing() -> Generator:
8+
""" "Generator to quickly time operations. It can add 5ms so take that into account in elapsed time
9+
10+
Examples
11+
--------
12+
13+
with timing() as t:
14+
print("something")
15+
elapsed = t()
16+
"""
17+
start = time.perf_counter()
18+
yield lambda: time.perf_counter() - start # gen as lambda to calculate elapsed time

tests/performance/test_metrics.py

Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
import time
2+
from contextlib import contextmanager
3+
from typing import Dict, Generator
4+
5+
import pytest
6+
7+
from aws_lambda_powertools import Metrics
8+
from aws_lambda_powertools.metrics import MetricUnit
9+
from aws_lambda_powertools.metrics import metrics as metrics_global
10+
11+
METRICS_VALIDATION_SLA: float = 0.01
12+
METRICS_SERIALIZATION_SLA: float = 0.01
13+
14+
15+
@contextmanager
16+
def timing() -> Generator:
17+
""" "Generator to quickly time operations. It can add 5ms so take that into account in elapsed time
18+
19+
Examples
20+
--------
21+
22+
with timing() as t:
23+
print("something")
24+
elapsed = t()
25+
"""
26+
start = time.perf_counter()
27+
yield lambda: time.perf_counter() - start # gen as lambda to calculate elapsed time
28+
29+
30+
@pytest.fixture(scope="function", autouse=True)
31+
def reset_metric_set():
32+
metrics = Metrics()
33+
metrics.clear_metrics()
34+
metrics_global.is_cold_start = True # ensure each test has cold start
35+
yield
36+
37+
38+
@pytest.fixture
39+
def namespace() -> str:
40+
return "test_namespace"
41+
42+
43+
@pytest.fixture
44+
def metric() -> Dict[str, str]:
45+
return {"name": "single_metric", "unit": MetricUnit.Count, "value": 1}
46+
47+
48+
def time_large_metric_set_operation(metrics_instance: Metrics, validate_metrics: bool = True) -> float:
49+
metrics_instance.add_dimension(name="test_dimension", value="test")
50+
51+
for i in range(99):
52+
metrics_instance.add_metric(name=f"metric_{i}", unit="Count", value=1)
53+
54+
with timing() as t:
55+
metrics_instance.serialize_metric_set(validate_metrics=validate_metrics)
56+
57+
return t()
58+
59+
60+
@pytest.mark.perf
61+
def test_metrics_validation_sla(namespace):
62+
# GIVEN Metrics is initialized
63+
my_metrics = Metrics(namespace=namespace)
64+
# WHEN we add and serialize 99 metrics
65+
elapsed = time_large_metric_set_operation(metrics_instance=my_metrics)
66+
67+
# THEN completion time should be below our validation SLA
68+
if elapsed > METRICS_VALIDATION_SLA:
69+
pytest.fail(f"Metric validation should be below {METRICS_VALIDATION_SLA}s: {elapsed}")
70+
71+
72+
@pytest.mark.perf
73+
def test_metrics_serialization_sla(namespace):
74+
# GIVEN Metrics is initialized with validation disabled
75+
my_metrics = Metrics(namespace=namespace, validate_metrics=False)
76+
# WHEN we add and serialize 99 metrics
77+
elapsed = time_large_metric_set_operation(metrics_instance=my_metrics, validate_metrics=False)
78+
79+
# THEN completion time should be below our serialization SLA
80+
if elapsed > METRICS_SERIALIZATION_SLA:
81+
pytest.fail(f"Metric serialization should be below {METRICS_SERIALIZATION_SLA}s: {elapsed}")

0 commit comments

Comments
 (0)