diff --git a/aws_lambda_powertools/logging/formatter.py b/aws_lambda_powertools/logging/formatter.py index 8aa07069f97..7b38524b6d1 100644 --- a/aws_lambda_powertools/logging/formatter.py +++ b/aws_lambda_powertools/logging/formatter.py @@ -85,7 +85,7 @@ def format(self, record): # noqa: A003 # Cache the traceback text to avoid converting it multiple times # (it's constant anyway) # from logging.Formatter:format - if not record.exc_text: + if not record.exc_text: # pragma: no cover record.exc_text = self.formatException(record.exc_info) if record.exc_text: diff --git a/aws_lambda_powertools/tracing/tracer.py b/aws_lambda_powertools/tracing/tracer.py index d31cbd61ebd..b97816f25b5 100644 --- a/aws_lambda_powertools/tracing/tracer.py +++ b/aws_lambda_powertools/tracing/tracer.py @@ -269,8 +269,10 @@ def decorate(event, context): function_name=lambda_handler_name, data=response, subsegment=subsegment ) except Exception as err: - logger.exception("Exception received from lambda handler") - self._add_full_exception_as_metadata(function_name=self.service, error=err, subsegment=subsegment) + logger.exception(f"Exception received from {lambda_handler_name}") + self._add_full_exception_as_metadata( + function_name=lambda_handler_name, error=err, subsegment=subsegment + ) raise return response diff --git a/pyproject.toml b/pyproject.toml index 59d5b2bbbc0..c147ab1c9a7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,6 +58,7 @@ directory = "test_report" title = "Lambda Powertools Test Coverage" [tool.coverage.report] +fail_under = 90 exclude_lines = [ # Have to re-enable the standard pragma "pragma: no cover", diff --git a/tests/functional/test_logger.py b/tests/functional/test_logger.py index 6b6a4bb6dde..211f12f8fc1 100644 --- a/tests/functional/test_logger.py +++ b/tests/functional/test_logger.py @@ -7,7 +7,7 @@ from aws_lambda_powertools import Logger, Tracer from aws_lambda_powertools.logging.exceptions import InvalidLoggerSamplingRateError -from aws_lambda_powertools.logging.logger import JsonFormatter, set_package_logger +from aws_lambda_powertools.logging.logger import set_package_logger @pytest.fixture @@ -39,227 +39,220 @@ def lambda_context(): return namedtuple("LambdaContext", lambda_context.keys())(*lambda_context.values()) +@pytest.fixture +def lambda_event(): + return {"greeting": "hello"} + + +def capture_logging_output(stdout): + return json.loads(stdout.getvalue()) + + +def capture_multiple_logging_statements_output(stdout): + return [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] + + def test_setup_service_name(root_logger, stdout): - # GIVEN service is explicitly defined - # WHEN logger is setup - # THEN service field should be equals service given service_name = "payment" + # GIVEN Logger is initialized + # WHEN service is explicitly defined logger = Logger(service=service_name, stream=stdout) logger.info("Hello") - log = json.loads(stdout.getvalue()) + # THEN service field should be equals service given + log = capture_logging_output(stdout) assert service_name == log["service"] def test_setup_no_service_name(stdout): - # GIVEN no service is explicitly defined - # WHEN logger is setup - # THEN service field should be "service_undefined" + # GIVEN Logger is initialized + # WHEN no service is explicitly defined logger = Logger(stream=stdout) + logger.info("Hello") - log = json.loads(stdout.getvalue()) + # THEN service field should be "service_undefined" + log = capture_logging_output(stdout) assert "service_undefined" == log["service"] def test_setup_service_env_var(monkeypatch, stdout): - # GIVEN service is explicitly defined via POWERTOOLS_SERVICE_NAME env - # WHEN logger is setup - # THEN service field should be equals POWERTOOLS_SERVICE_NAME value service_name = "payment" + # GIVEN Logger is initialized + # WHEN service is explicitly defined via POWERTOOLS_SERVICE_NAME env monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", service_name) - logger = Logger(stream=stdout) + logger.info("Hello") - log = json.loads(stdout.getvalue()) + # THEN service field should be equals POWERTOOLS_SERVICE_NAME value + log = capture_logging_output(stdout) assert service_name == log["service"] -def test_setup_sampling_rate(monkeypatch, stdout): - # GIVEN samping rate is explicitly defined via POWERTOOLS_LOGGER_SAMPLE_RATE env - # WHEN logger is setup - # THEN sampling rate should be equals POWERTOOLS_LOGGER_SAMPLE_RATE value and should sample debug logs - +def test_setup_sampling_rate_env_var(monkeypatch, stdout): + # GIVEN Logger is initialized + # WHEN samping rate is explicitly set to 100% via POWERTOOLS_LOGGER_SAMPLE_RATE env sampling_rate = "1" monkeypatch.setenv("POWERTOOLS_LOGGER_SAMPLE_RATE", sampling_rate) - monkeypatch.setenv("LOG_LEVEL", "INFO") - - logger = Logger(stream=stdout) + logger = Logger(stream=stdout, level="INFO") logger.debug("I am being sampled") - log = json.loads(stdout.getvalue()) + # THEN sampling rate should be equals POWERTOOLS_LOGGER_SAMPLE_RATE value + # log level should be DEBUG + # and debug log statements should be in stdout + log = capture_logging_output(stdout) assert sampling_rate == log["sampling_rate"] assert "DEBUG" == log["level"] assert "I am being sampled" == log["message"] def test_inject_lambda_context(lambda_context, stdout): - # GIVEN a lambda function is decorated with logger - # WHEN logger is setup - # THEN lambda contextual info should always be in the logs - logger_context_keys = ( - "function_name", - "function_memory_size", - "function_arn", - "function_request_id", - ) - + # GIVEN Logger is initialized logger = Logger(stream=stdout) + # WHEN a lambda function is decorated with logger @logger.inject_lambda_context def handler(event, context): logger.info("Hello") handler({}, lambda_context) - log = json.loads(stdout.getvalue()) - - for key in logger_context_keys: + # THEN lambda contextual info should always be in the logs + log = capture_logging_output(stdout) + expected_logger_context_keys = ( + "function_name", + "function_memory_size", + "function_arn", + "function_request_id", + ) + for key in expected_logger_context_keys: assert key in log -def test_inject_lambda_context_log_event_request(lambda_context, stdout): - # GIVEN a lambda function is decorated with logger instructed to log event - # WHEN logger is setup - # THEN logger should log event received from Lambda - lambda_event = {"greeting": "hello"} - +def test_inject_lambda_context_log_event_request(lambda_context, stdout, lambda_event): + # GIVEN Logger is initialized logger = Logger(stream=stdout) + # WHEN a lambda function is decorated with logger instructed to log event @logger.inject_lambda_context(log_event=True) - # @logger.inject_lambda_context(log_event=True) def handler(event, context): logger.info("Hello") handler(lambda_event, lambda_context) - # Given that our string buffer has many log statements separated by newline \n - # We need to clean it before we can assert on - logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] - logged_event, _ = logs - assert "greeting" in logged_event["message"] + # THEN logger should log event received from Lambda + logged_event, _ = capture_multiple_logging_statements_output(stdout) + assert logged_event["message"] == lambda_event -def test_inject_lambda_context_log_event_request_env_var(monkeypatch, lambda_context, stdout): - # GIVEN a lambda function is decorated with logger instructed to log event - # via POWERTOOLS_LOGGER_LOG_EVENT env - # WHEN logger is setup - # THEN logger should log event received from Lambda - lambda_event = {"greeting": "hello"} +def test_inject_lambda_context_log_event_request_env_var(monkeypatch, lambda_context, stdout, lambda_event): + # GIVEN Logger is initialized monkeypatch.setenv("POWERTOOLS_LOGGER_LOG_EVENT", "true") - logger = Logger(stream=stdout) + # WHEN a lambda function is decorated with logger instructed to log event + # via POWERTOOLS_LOGGER_LOG_EVENT env @logger.inject_lambda_context def handler(event, context): logger.info("Hello") handler(lambda_event, lambda_context) - # Given that our string buffer has many log statements separated by newline \n - # We need to clean it before we can assert on - logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] - - event = {} - for log in logs: - if "greeting" in log["message"]: - event = log["message"] - - assert event == lambda_event - + # THEN logger should log event received from Lambda + logged_event, _ = capture_multiple_logging_statements_output(stdout) + assert logged_event["message"] == lambda_event -def test_inject_lambda_context_log_no_request_by_default(monkeypatch, lambda_context, stdout): - # GIVEN a lambda function is decorated with logger - # WHEN logger is setup - # THEN logger should not log event received by lambda handler - lambda_event = {"greeting": "hello"} +def test_inject_lambda_context_log_no_request_by_default(monkeypatch, lambda_context, stdout, lambda_event): + # GIVEN Logger is initialized logger = Logger(stream=stdout) + # WHEN a lambda function is decorated with logger @logger.inject_lambda_context def handler(event, context): logger.info("Hello") handler(lambda_event, lambda_context) - # Given that our string buffer has many log statements separated by newline \n - # We need to clean it before we can assert on - logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] - - event = {} - for log in logs: - if "greeting" in log["message"]: - event = log["message"] - - assert event != lambda_event + # THEN logger should not log event received by lambda handler + log = capture_logging_output(stdout) + assert log["message"] != lambda_event def test_inject_lambda_cold_start(lambda_context, stdout): - # GIVEN a lambda function is decorated with logger, and called twice - # WHEN logger is setup - # THEN cold_start key should only be true in the first call - + # cold_start can be false as it's a global variable in Logger module + # so we reset it to simulate the correct behaviour + # since Lambda will only import our logger lib once per concurrent execution from aws_lambda_powertools.logging import logger - # # As we run tests in parallel global cold_start value can be false - # # here we reset to simulate the correct behaviour - # # since Lambda will only import our logger lib once per concurrent execution logger.is_cold_start = True + # GIVEN Logger is initialized logger = Logger(stream=stdout) - def custom_method(): - logger.info("Hello from method") - + # WHEN a lambda function is decorated with logger, and called twice @logger.inject_lambda_context def handler(event, context): - custom_method() logger.info("Hello") handler({}, lambda_context) handler({}, lambda_context) - # Given that our string buffer has many log statements separated by newline \n - # We need to clean it before we can assert on - logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] - first_log, second_log, third_log, fourth_log = logs - - # First execution + # THEN cold_start key should only be true in the first call + first_log, second_log = capture_multiple_logging_statements_output(stdout) assert first_log["cold_start"] is True - assert second_log["cold_start"] is True - - # Second execution - assert third_log["cold_start"] is False - assert fourth_log["cold_start"] is False + assert second_log["cold_start"] is False -def test_package_logger(capsys): +def test_package_logger_stream(stdout): + # GIVEN package logger "aws_lambda_powertools" is explicitly set with no params + set_package_logger(stream=stdout) - set_package_logger() + # WHEN Tracer is initialized in disabled mode Tracer(disabled=True) - output = capsys.readouterr() - assert "Tracing has been disabled" in output.out + # THEN Tracer debug log statement should be logged + output = stdout.getvalue() + logger = logging.getLogger("aws_lambda_powertools") + assert "Tracing has been disabled" in output + assert logger.level == logging.DEBUG -def test_package_logger_format(stdout, capsys): - set_package_logger(stream=stdout, formatter=JsonFormatter(formatter="test")) +def test_package_logger_format(capsys): + # GIVEN package logger "aws_lambda_powertools" is explicitly + # with a custom formatter + formatter = logging.Formatter("message=%(message)s") + set_package_logger(formatter=formatter) + + # WHEN Tracer is initialized in disabled mode Tracer(disabled=True) - output = json.loads(stdout.getvalue().split("\n")[0]) - assert "test" in output["formatter"] + # THEN Tracer debug log statement should be logged using `message=` format + output = capsys.readouterr().out + logger = logging.getLogger("aws_lambda_powertools") + assert "message=" in output + assert logger.level == logging.DEBUG def test_logger_append_duplicated(stdout): + # GIVEN Logger is initialized with request_id field logger = Logger(stream=stdout, request_id="value") + + # WHEN `request_id` is appended to the existing structured log + # using a different value logger.structure_logs(append=True, request_id="new_value") logger.info("log") - log = json.loads(stdout.getvalue()) + + # THEN subsequent log statements should have the latest value + log = capture_logging_output(stdout) assert "new_value" == log["request_id"] def test_logger_invalid_sampling_rate(): + # GIVEN Logger is initialized + # WHEN sampling_rate non-numeric value + # THEN we should raise InvalidLoggerSamplingRateError with pytest.raises(InvalidLoggerSamplingRateError): Logger(sampling_rate="TEST") diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 244a56119cd..efc93daa739 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -52,17 +52,18 @@ def non_str_dimensions() -> List[Dict[str, Any]]: @pytest.fixture -def namespace() -> Dict[str, str]: +def namespace() -> str: return "test_namespace" @pytest.fixture -def a_hundred_metrics(namespace=namespace) -> List[Dict[str, str]]: - metrics = [] - for i in range(100): - metrics.append({"name": f"metric_{i}", "unit": "Count", "value": 1}) +def service() -> str: + return "test_service" + - return metrics +@pytest.fixture +def a_hundred_metrics(namespace=namespace) -> List[Dict[str, str]]: + return [{"name": f"metric_{i}", "unit": "Count", "value": 1} for i in range(100)] def serialize_metrics(metrics: List[Dict], dimensions: List[Dict], namespace: str) -> Dict: @@ -92,21 +93,23 @@ def remove_timestamp(metrics: List): del metric["_aws"]["Timestamp"] -def test_single_metric_one_metric_only(capsys, metric, dimension, namespace): - # GIVEN we attempt to add more than one metric +def capture_metrics_output(capsys): + return json.loads(capsys.readouterr().out.strip()) + + +def test_single_metric_logs_one_metric_only(capsys, metric, dimension, namespace): + # GIVEN we try adding more than one metric # WHEN using single_metric context manager with single_metric(namespace=namespace, **metric) as my_metric: my_metric.add_metric(name="second_metric", unit="Count", value=1) - my_metric.add_metric(name="third_metric", unit="Seconds", value=1) my_metric.add_dimension(**dimension) - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different - # THEN we should only have the first metric added - assert expected["_aws"] == output["_aws"] + remove_timestamp(metrics=[output, expected]) + assert expected == output def test_log_metrics(capsys, metrics, dimensions, namespace): @@ -121,65 +124,48 @@ def test_log_metrics(capsys, metrics, dimensions, namespace): # and flush all metrics at the end of a function execution @my_metrics.log_metrics def lambda_handler(evt, ctx): - return True + pass lambda_handler({}, {}) - - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace=namespace) - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different - # THEN we should have no exceptions - # and a valid EMF object should've been flushed correctly - assert expected["_aws"] == output["_aws"] - for dimension in dimensions: - assert dimension["name"] in output + # and a valid EMF object should be flushed correctly + remove_timestamp(metrics=[output, expected]) + assert expected == output def test_namespace_env_var(monkeypatch, capsys, metric, dimension, namespace): - # GIVEN we use POWERTOOLS_METRICS_NAMESPACE + # GIVEN POWERTOOLS_METRICS_NAMESPACE is set monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", namespace) - # WHEN creating a metric but don't explicitly - # add a namespace - with single_metric(**metric) as my_metrics: - my_metrics.add_dimension(**dimension) - monkeypatch.delenv("POWERTOOLS_METRICS_NAMESPACE") + # WHEN creating a metric without explicitly adding a namespace + with single_metric(**metric) as my_metric: + my_metric.add_dimension(**dimension) - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different - - # THEN we should add a namespace implicitly - # with the value of POWERTOOLS_METRICS_NAMESPACE env var - assert expected["_aws"] == output["_aws"] + # THEN we should add a namespace using POWERTOOLS_METRICS_NAMESPACE env var value + remove_timestamp(metrics=[output, expected]) + assert expected == output def test_service_env_var(monkeypatch, capsys, metric, namespace): # GIVEN we use POWERTOOLS_SERVICE_NAME monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", "test_service") - my_metrics = Metrics(namespace=namespace) - - # WHEN creating a metric but don't explicitly - # add a dimension - @my_metrics.log_metrics - def lambda_handler(evt, context): - my_metrics.add_metric(**metric) - return True - lambda_handler({}, {}) - - monkeypatch.delenv("POWERTOOLS_SERVICE_NAME") + # WHEN creating a metric without explicitly adding a dimension + with single_metric(**metric, namespace=namespace): + pass - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) expected_dimension = {"name": "service", "value": "test_service"} expected = serialize_single_metric(metric=metric, dimension=expected_dimension, namespace=namespace) - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different - - # THEN metrics should be logged using the implicitly created "service" dimension + # THEN a metric should be logged using the implicitly created "service" dimension + remove_timestamp(metrics=[output, expected]) assert expected == output @@ -194,7 +180,7 @@ def test_metrics_spillover(monkeypatch, capsys, metric, dimension, namespace, a_ # THEN it should serialize and flush all metrics at the 100th # and clear all metrics and dimensions from memory - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) spillover_metrics = output["_aws"]["CloudWatchMetrics"][0]["Metrics"] assert my_metrics.metric_set == {} assert len(spillover_metrics) == 100 @@ -206,87 +192,75 @@ def test_metrics_spillover(monkeypatch, capsys, metric, dimension, namespace, a_ # THEN serializing the 101th metric should # create a new EMF object with a single metric in it (101th) - # and contain have the same dimension we previously added + # and contain the same dimension we previously added serialized_101th_metric = my_metrics.serialize_metric_set() expected_101th_metric = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) remove_timestamp(metrics=[serialized_101th_metric, expected_101th_metric]) + assert serialized_101th_metric == expected_101th_metric - assert serialized_101th_metric["_aws"] == expected_101th_metric["_aws"] - -def test_log_metrics_should_invoke_function(metric, dimension, namespace): +def test_log_metrics_decorator_call_decorated_function(metric, namespace, service): # GIVEN Metrics is initialized - my_metrics = Metrics(namespace=namespace) + my_metrics = Metrics(service=service, namespace=namespace) # WHEN log_metrics is used to serialize metrics @my_metrics.log_metrics def lambda_handler(evt, context): - my_metrics.add_metric(**metric) - my_metrics.add_dimension(**dimension) return True # THEN log_metrics should invoke the function it decorates - # and return no error if we have a metric, namespace, and a dimension - lambda_handler({}, {}) + # and return no error if we have a namespace and dimension + assert lambda_handler({}, {}) is True -def test_incorrect_metric_unit(metric, dimension, namespace): - # GIVEN we pass a metric unit not supported by CloudWatch +def test_schema_validation_incorrect_metric_unit(metric, dimension, namespace): + # GIVEN we pass a metric unit that is not supported by CloudWatch metric["unit"] = "incorrect_unit" - # WHEN we attempt to add a new metric - # THEN it should fail validation and raise MetricUnitError + # WHEN we try adding a new metric + # THEN it should fail metric unit validation with pytest.raises(MetricUnitError): with single_metric(**metric) as my_metric: my_metric.add_dimension(**dimension) -def test_schema_no_namespace(metric, dimension): - # GIVEN we add any metric or dimension - # but no namespace - +def test_schema_validation_no_namespace(metric, dimension): + # GIVEN we don't add any namespace # WHEN we attempt to serialize a valid EMF object - # THEN it should fail validation and raise SchemaValidationError - with pytest.raises(SchemaValidationError): - with single_metric(**metric) as my_metric: - my_metric.add_dimension(**dimension) + # THEN it should fail namespace validation + with pytest.raises(SchemaValidationError, match=".*Namespace must be string"): + with single_metric(**metric): + pass -def test_schema_incorrect_value(metric, dimension, namespace): - # GIVEN we pass an incorrect metric value (non-number/float) +def test_schema_validation_incorrect_metric_value(metric, dimension, namespace): + # GIVEN we pass an incorrect metric value (non-numeric) metric["value"] = "some_value" # WHEN we attempt to serialize a valid EMF object # THEN it should fail validation and raise SchemaValidationError with pytest.raises(MetricValueError): - with single_metric(**metric) as my_metric: - my_metric.add_dimension(**dimension) + with single_metric(**metric): + pass -def test_schema_no_metrics(dimensions, namespace): +def test_schema_no_metrics(service, namespace): # GIVEN Metrics is initialized - my_metrics = Metrics(namespace=namespace) - - # WHEN no metrics have been added - # but a namespace and dimensions only - for dimension in dimensions: - my_metrics.add_dimension(**dimension) + my_metrics = Metrics(service=service, namespace=namespace) # THEN it should fail validation and raise SchemaValidationError - with pytest.raises(SchemaValidationError): + with pytest.raises(SchemaValidationError, match=".*Metrics must contain at least 1 items"): my_metrics.serialize_metric_set() def test_exceed_number_of_dimensions(metric, namespace): # GIVEN we we have more dimensions than CloudWatch supports - dimensions = [] - for i in range(11): - dimensions.append({"name": f"test_{i}", "value": "test"}) + dimensions = [{"name": f"test_{i}", "value": "test"} for i in range(11)] # WHEN we attempt to serialize them into a valid EMF object # THEN it should fail validation and raise SchemaValidationError - with pytest.raises(SchemaValidationError): - with single_metric(**metric) as my_metric: + with pytest.raises(SchemaValidationError, match="must contain less than or equal to 9 items"): + with single_metric(**metric, namespace=namespace) as my_metric: for dimension in dimensions: my_metric.add_dimension(**dimension) @@ -294,9 +268,8 @@ def test_exceed_number_of_dimensions(metric, namespace): def test_log_metrics_during_exception(capsys, metric, dimension, namespace): # GIVEN Metrics is initialized my_metrics = Metrics(namespace=namespace) - - my_metrics.add_metric(**metric) my_metrics.add_dimension(**dimension) + my_metrics.add_metric(**metric) # WHEN log_metrics is used to serialize metrics # but an error has been raised during handler execution @@ -307,31 +280,30 @@ def lambda_handler(evt, context): with pytest.raises(ValueError): lambda_handler({}, {}) - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different - # THEN we should log metrics and propagate the exception up - assert expected["_aws"] == output["_aws"] + # THEN we should log metrics either way + remove_timestamp(metrics=[output, expected]) + assert expected == output def test_log_metrics_raise_on_empty_metrics(capsys, metric, dimension, namespace): # GIVEN Metrics is initialized my_metrics = Metrics(service="test_service", namespace=namespace) + # WHEN log_metrics is used with raise_on_empty_metrics param and has no metrics @my_metrics.log_metrics(raise_on_empty_metrics=True) def lambda_handler(evt, context): - # WHEN log_metrics is used with raise_on_empty_metrics param and has no metrics - return True + pass # THEN the raised exception should be SchemaValidationError # and specifically about the lack of Metrics - with pytest.raises(SchemaValidationError, match="_aws\.CloudWatchMetrics\[0\]\.Metrics"): # noqa: W605 + with pytest.raises(SchemaValidationError, match=".*Metrics must contain at least 1 items"): lambda_handler({}, {}) def test_all_possible_metric_units(metric, dimension, namespace): - # GIVEN we add a metric for each metric unit supported by CloudWatch # where metric unit as MetricUnit key e.g. "Seconds", "BytesPerSecond" for unit in MetricUnit: @@ -344,9 +316,8 @@ def test_all_possible_metric_units(metric, dimension, namespace): # WHEN we iterate over all available metric unit keys from MetricUnit enum all_metric_units = [unit.value for unit in MetricUnit] - # metric unit as MetricUnit value e.g. "Seconds", "Bytes/Second" for unit in all_metric_units: - metric["unit"] = unit + metric["unit"] = unit # e.g. "Seconds", "Bytes/Second" # THEN we raise no MetricUnitError nor SchemaValidationError with single_metric(namespace=namespace, **metric) as my_metric: my_metric.add_dimension(**dimension) @@ -364,17 +335,15 @@ def test_metrics_reuse_metric_set(metric, dimension, namespace): assert my_metrics_2.metric_set == my_metrics.metric_set -def test_log_metrics_clear_metrics_after_invocation(metric, dimension, namespace): +def test_log_metrics_clear_metrics_after_invocation(metric, service, namespace): # GIVEN Metrics is initialized - my_metrics = Metrics(namespace=namespace) - + my_metrics = Metrics(service=service, namespace=namespace) my_metrics.add_metric(**metric) - my_metrics.add_dimension(**dimension) # WHEN log_metrics is used to flush metrics from memory @my_metrics.log_metrics def lambda_handler(evt, context): - return True + pass lambda_handler({}, {}) @@ -382,11 +351,10 @@ def lambda_handler(evt, context): assert my_metrics.metric_set == {} -def test_log_metrics_non_string_dimension_values(capsys, metrics, non_str_dimensions, namespace): +def test_log_metrics_non_string_dimension_values(capsys, service, metric, non_str_dimensions, namespace): # GIVEN Metrics is initialized and dimensions with non-string values are added - my_metrics = Metrics(namespace=namespace) - for metric in metrics: - my_metrics.add_metric(**metric) + my_metrics = Metrics(service=service, namespace=namespace) + my_metrics.add_metric(**metric) for dimension in non_str_dimensions: my_metrics.add_dimension(**dimension) @@ -394,179 +362,137 @@ def test_log_metrics_non_string_dimension_values(capsys, metrics, non_str_dimens # and flush all metrics at the end of a function execution @my_metrics.log_metrics def lambda_handler(evt, ctx): - return True + pass lambda_handler({}, {}) - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) # THEN we should have no exceptions - # and dimension values hould be serialized as strings + # and dimension values should be serialized as strings for dimension in non_str_dimensions: assert isinstance(output[dimension["name"]], str) -def test_log_metrics_with_explicit_namespace(capsys, metrics, dimensions, namespace): - # GIVEN Metrics is initialized with service specified - my_metrics = Metrics(service="test_service", namespace=namespace) - for metric in metrics: - my_metrics.add_metric(**metric) - for dimension in dimensions: - my_metrics.add_dimension(**dimension) +def test_log_metrics_with_explicit_namespace(capsys, metric, service, namespace): + # GIVEN Metrics is initialized with explicit namespace + my_metrics = Metrics(service=service, namespace=namespace) + my_metrics.add_metric(**metric) # WHEN we utilize log_metrics to serialize # and flush all metrics at the end of a function execution @my_metrics.log_metrics def lambda_handler(evt, ctx): - return True + pass lambda_handler({}, {}) - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) - dimensions.append({"name": "service", "value": "test_service"}) - expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace=namespace) + # THEN we should have no exceptions and the namespace should be set + # using the service value passed to Metrics constructor + assert namespace == output["_aws"]["CloudWatchMetrics"][0]["Namespace"] - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different - # THEN we should have no exceptions and the namespace should be set to the name provided in the - # service passed to Metrics constructor - assert expected == output - - -def test_log_metrics_with_implicit_dimensions(capsys, metrics, namespace): +def test_log_metrics_with_implicit_dimensions(capsys, metric, namespace, service): # GIVEN Metrics is initialized with service specified - my_metrics = Metrics(service="test_service", namespace=namespace) - for metric in metrics: - my_metrics.add_metric(**metric) + my_metrics = Metrics(service=service, namespace=namespace) + my_metrics.add_metric(**metric) # WHEN we utilize log_metrics to serialize and don't explicitly add any dimensions @my_metrics.log_metrics def lambda_handler(evt, ctx): - return True + pass lambda_handler({}, {}) - output = json.loads(capsys.readouterr().out.strip()) - - expected_dimensions = [{"name": "service", "value": "test_service"}] - expected = serialize_metrics(metrics=metrics, dimensions=expected_dimensions, namespace=namespace) - - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different + output = capture_metrics_output(capsys) # THEN we should have no exceptions and the dimensions should be set to the name provided in the # service passed to Metrics constructor - assert expected == output + assert service == output["service"] -def test_log_metrics_with_renamed_service(capsys, metrics, metric): +def test_log_metrics_with_renamed_service(capsys, metric, service): # GIVEN Metrics is initialized with service specified - my_metrics = Metrics(service="test_service", namespace="test_application") - for metric in metrics: - my_metrics.add_metric(**metric) + my_metrics = Metrics(service=service, namespace="test_application") + another_service_dimension = {"name": "service", "value": "another_test_service"} @my_metrics.log_metrics def lambda_handler(evt, ctx): # WHEN we manually call add_dimension to change the value of the service dimension - my_metrics.add_dimension(name="service", value="another_test_service") + my_metrics.add_dimension(**another_service_dimension) my_metrics.add_metric(**metric) - return True lambda_handler({}, {}) + output = capture_metrics_output(capsys) - output = json.loads(capsys.readouterr().out.strip()) lambda_handler({}, {}) - second_output = json.loads(capsys.readouterr().out.strip()) - - remove_timestamp(metrics=[output]) # Timestamp will always be different + second_output = capture_metrics_output(capsys) # THEN we should have no exceptions and the dimensions should be set to the name provided in the # add_dimension call - assert output["service"] == "another_test_service" - assert second_output["service"] == "another_test_service" - - -def test_single_metric_with_service(capsys, metric, dimension, namespace): - # GIVEN we pass namespace parameter to single_metric - - # WHEN creating a metric - with single_metric(**metric, namespace=namespace) as my_metrics: - my_metrics.add_dimension(**dimension) - - output = json.loads(capsys.readouterr().out.strip()) - expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) - - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different - - # THEN namespace should match value passed as service - assert expected["_aws"] == output["_aws"] + assert output["service"] == another_service_dimension["value"] + assert second_output["service"] == another_service_dimension["value"] def test_namespace_var_precedence(monkeypatch, capsys, metric, dimension, namespace): # GIVEN we use POWERTOOLS_METRICS_NAMESPACE - monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", namespace) + monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", "a_namespace") # WHEN creating a metric and explicitly set a namespace with single_metric(namespace=namespace, **metric) as my_metrics: my_metrics.add_dimension(**dimension) - monkeypatch.delenv("POWERTOOLS_METRICS_NAMESPACE") - output = json.loads(capsys.readouterr().out.strip()) - expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) - - remove_timestamp(metrics=[output, expected]) # Timestamp will always be different + output = capture_metrics_output(capsys) # THEN namespace should match the explicitly passed variable and not the env var - assert expected["_aws"] == output["_aws"] + assert namespace == output["_aws"]["CloudWatchMetrics"][0]["Namespace"] -def test_emit_cold_start_metric(capsys, namespace): +def test_log_metrics_capture_cold_start_metric(capsys, namespace, service): # GIVEN Metrics is initialized - my_metrics = Metrics(service="test_service", namespace=namespace) + my_metrics = Metrics(service=service, namespace=namespace) # WHEN log_metrics is used with capture_cold_start_metric @my_metrics.log_metrics(capture_cold_start_metric=True) def lambda_handler(evt, context): - return True + pass LambdaContext = namedtuple("LambdaContext", "function_name") lambda_handler({}, LambdaContext("example_fn")) - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) # THEN ColdStart metric and function_name dimension should be logged assert output["ColdStart"] == 1 assert output["function_name"] == "example_fn" -def test_emit_cold_start_metric_only_once(capsys, namespace, dimension, metric): +def test_emit_cold_start_metric_only_once(capsys, namespace, service, metric): # GIVEN Metrics is initialized - my_metrics = Metrics(namespace=namespace) + my_metrics = Metrics(service=service, namespace=namespace) # WHEN log_metrics is used with capture_cold_start_metric # and handler is called more than once @my_metrics.log_metrics(capture_cold_start_metric=True) def lambda_handler(evt, context): my_metrics.add_metric(**metric) - my_metrics.add_dimension(**dimension) LambdaContext = namedtuple("LambdaContext", "function_name") lambda_handler({}, LambdaContext("example_fn")) - capsys.readouterr().out.strip() + _ = capture_metrics_output(capsys) # ignore first stdout captured - # THEN ColdStart metric and function_name dimension should be logged - # only once + # THEN ColdStart metric and function_name dimension should be logged once lambda_handler({}, LambdaContext("example_fn")) - - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) assert "ColdStart" not in output - assert "function_name" not in output -def test_log_metrics_decorator_no_metrics(dimensions, namespace): +def test_log_metrics_decorator_no_metrics_warning(dimensions, namespace, service): # GIVEN Metrics is initialized - my_metrics = Metrics(namespace=namespace, service="test_service") + my_metrics = Metrics(namespace=namespace, service=service) # WHEN using the log_metrics decorator and no metrics have been added @my_metrics.log_metrics @@ -580,28 +506,22 @@ def lambda_handler(evt, context): assert str(w[-1].message) == "No metrics to publish, skipping" -def test_log_metrics_with_implicit_dimensions_called_twice(capsys, metrics, namespace): +def test_log_metrics_with_implicit_dimensions_called_twice(capsys, metric, namespace, service): # GIVEN Metrics is initialized with service specified - my_metrics = Metrics(service="test_service", namespace=namespace) + my_metrics = Metrics(service=service, namespace=namespace) # WHEN we utilize log_metrics to serialize and don't explicitly add any dimensions, # and the lambda function is called more than once @my_metrics.log_metrics def lambda_handler(evt, ctx): - for metric in metrics: - my_metrics.add_metric(**metric) + my_metrics.add_metric(**metric) return True lambda_handler({}, {}) - output = json.loads(capsys.readouterr().out.strip()) + output = capture_metrics_output(capsys) lambda_handler({}, {}) - second_output = json.loads(capsys.readouterr().out.strip()) - - expected_dimensions = [{"name": "service", "value": "test_service"}] - expected = serialize_metrics(metrics=metrics, dimensions=expected_dimensions, namespace=namespace) - - remove_timestamp(metrics=[output, expected, second_output]) # Timestamp will always be different + second_output = capture_metrics_output(capsys) # THEN we should have no exceptions and the dimensions should be set to the name provided in the # service passed to Metrics constructor diff --git a/tests/functional/test_tracing.py b/tests/functional/test_tracing.py index 6cff182015e..cda0a85cc4d 100644 --- a/tests/functional/test_tracing.py +++ b/tests/functional/test_tracing.py @@ -14,120 +14,125 @@ def reset_tracing_config(): yield +@pytest.fixture +def service_name(): + return "booking" + + def test_capture_lambda_handler(dummy_response): - # GIVEN tracer is disabled, and decorator is used - # WHEN a lambda handler is run - # THEN tracer should not raise an Exception + # GIVEN tracer lambda handler decorator is used tracer = Tracer(disabled=True) + # WHEN a lambda handler is run @tracer.capture_lambda_handler def handler(event, context): return dummy_response + # THEN tracer should not raise an Exception handler({}, {}) def test_capture_method(dummy_response): - # GIVEN tracer is disabled, and method decorator is used - # WHEN a function is run - # THEN tracer should not raise an Exception - + # GIVEN tracer method decorator is used tracer = Tracer(disabled=True) + # WHEN a function is run @tracer.capture_method def greeting(name, message): return dummy_response + # THEN tracer should not raise an Exception greeting(name="Foo", message="Bar") def test_tracer_lambda_emulator(monkeypatch, dummy_response): - # GIVEN tracer is run locally - # WHEN a lambda function is run through SAM CLI - # THEN tracer should not raise an Exception + # GIVEN tracer runs locally monkeypatch.setenv("AWS_SAM_LOCAL", "true") tracer = Tracer() + # WHEN a lambda function is run through SAM CLI @tracer.capture_lambda_handler def handler(event, context): return dummy_response + # THEN tracer should run in disabled mode, and not raise an Exception handler({}, {}) - monkeypatch.delenv("AWS_SAM_LOCAL") def test_tracer_metadata_disabled(dummy_response): # GIVEN tracer is disabled, and annotations/metadata are used - # WHEN a lambda handler is run - # THEN tracer should not raise an Exception and simply ignore tracer = Tracer(disabled=True) + # WHEN a lambda handler is run @tracer.capture_lambda_handler def handler(event, context): tracer.put_annotation("PaymentStatus", "SUCCESS") tracer.put_metadata("PaymentMetadata", "Metadata") return dummy_response + # THEN tracer should not raise any Exception handler({}, {}) -def test_tracer_env_vars(monkeypatch): - # GIVEN tracer disabled, is run without parameters - # WHEN service is explicitly defined - # THEN tracer should have use that service name - service_name = "booking" +def test_tracer_service_env_var(monkeypatch, service_name): + # GIVEN tracer is run without parameters + # WHEN service is implicitly defined via env var monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", service_name) - tracer_env_var = Tracer(disabled=True) + tracer = Tracer(disabled=True) + + # THEN tracer should have use that service name + assert tracer.service == service_name - assert tracer_env_var.service == service_name +def test_tracer_explicit_service(monkeypatch, service_name): + # GIVEN tracer is disabled + # WHEN service is explicitly defined tracer_explicit = Tracer(disabled=True, service=service_name) assert tracer_explicit.service == service_name monkeypatch.setenv("POWERTOOLS_TRACE_DISABLED", "true") tracer = Tracer() - assert bool(tracer.disabled) is True + # THEN tracer should have use that service name + assert tracer.service == service_name -def test_tracer_with_exception(mocker): - # GIVEN tracer is disabled, decorator is used - # WHEN a lambda handler or method returns an Exception - # THEN tracer should reraise the same Exception +def test_tracer_propagate_exception(mocker): + # GIVEN tracer decorator is used class CustomException(Exception): pass tracer = Tracer(disabled=True) + # WHEN a lambda handler or method returns an Exception @tracer.capture_lambda_handler def handler(event, context): raise CustomException("test") @tracer.capture_method - def greeting(name, message): + def greeting(): raise CustomException("test") + # THEN tracer should reraise the same Exception with pytest.raises(CustomException): handler({}, {}) with pytest.raises(CustomException): - greeting(name="Foo", message="Bar") + greeting() -def test_tracer_reuse(): - # GIVEN tracer A, B were initialized - # WHEN tracer B explicitly reuses A config - # THEN tracer B attributes should be equal to tracer A - service_name = "booking" +def test_tracer_reuse_configuration(service_name): + # GIVEN tracer A is initialized tracer_a = Tracer(disabled=True, service=service_name) + # WHEN tracer B is initialized afterwards tracer_b = Tracer() - assert id(tracer_a) != id(tracer_b) + # THEN tracer B attributes should be equal to tracer A assert tracer_a.__dict__.items() == tracer_b.__dict__.items() def test_tracer_method_nested_sync(mocker): - # GIVEN tracer is disabled, decorator is used + # GIVEN tracer decorator is used # WHEN multiple sync functions are nested # THEN tracer should not raise a Runtime Error tracer = Tracer(disabled=True) diff --git a/tests/unit/test_tracing.py b/tests/unit/test_tracing.py index 65cb04c997b..d1e5408bb77 100644 --- a/tests/unit/test_tracing.py +++ b/tests/unit/test_tracing.py @@ -79,15 +79,20 @@ class In_subsegment(NamedTuple): def test_tracer_lambda_handler(mocker, dummy_response, provider_stub, in_subsegment_mock): + # GIVEN Tracer is initialized with booking as the service name provider = provider_stub(in_subsegment=in_subsegment_mock.in_subsegment) tracer = Tracer(provider=provider, service="booking") + # WHEN lambda_handler decorator is used @tracer.capture_lambda_handler def handler(event, context): return dummy_response handler({}, mocker.MagicMock()) + # THEN we should have a subsegment named handler + # annotate cold start, and add its response as trace metadata + # and use service name as a metadata namespace assert in_subsegment_mock.in_subsegment.call_count == 1 assert in_subsegment_mock.in_subsegment.call_args == mocker.call(name="## handler") assert in_subsegment_mock.put_metadata.call_args == mocker.call( @@ -98,19 +103,39 @@ def handler(event, context): def test_tracer_method(mocker, dummy_response, provider_stub, in_subsegment_mock): + # GIVEN Tracer is initialized with booking as the service name provider = provider_stub(in_subsegment=in_subsegment_mock.in_subsegment) - Tracer(provider=provider, service="booking") + tracer = Tracer(provider=provider, service="booking") + + # WHEN capture_method decorator is used + @tracer.capture_method + def greeting(name, message): + return dummy_response + + greeting(name="Foo", message="Bar") + + # THEN we should have a subsegment named after the method name + # and add its response as trace metadata + # and use service name as a metadata namespace + assert in_subsegment_mock.in_subsegment.call_count == 1 + assert in_subsegment_mock.in_subsegment.call_args == mocker.call(name="## greeting") + assert in_subsegment_mock.put_metadata.call_args == mocker.call( + key="greeting response", value=dummy_response, namespace="booking" + ) def test_tracer_custom_metadata(mocker, dummy_response, provider_stub): + # GIVEN Tracer is initialized with booking as the service name put_metadata_mock = mocker.MagicMock() - annotation_key = "Booking response" - annotation_value = {"bookingStatus": "CONFIRMED"} - provider = provider_stub(put_metadata_mock=put_metadata_mock) tracer = Tracer(provider=provider, service="booking") + + # WHEN put_metadata is used + annotation_key = "Booking response" + annotation_value = {"bookingStatus": "CONFIRMED"} tracer.put_metadata(annotation_key, annotation_value) + # THEN we should have metadata expected and booking as namespace assert put_metadata_mock.call_count == 1 assert put_metadata_mock.call_args_list[0] == mocker.call( key=annotation_key, value=annotation_value, namespace="booking" @@ -118,87 +143,97 @@ def test_tracer_custom_metadata(mocker, dummy_response, provider_stub): def test_tracer_custom_annotation(mocker, dummy_response, provider_stub): + # GIVEN Tracer is initialized put_annotation_mock = mocker.MagicMock() - annotation_key = "BookingId" - annotation_value = "123456" - provider = provider_stub(put_annotation_mock=put_annotation_mock) - tracer = Tracer(provider=provider, service="booking") + tracer = Tracer(provider=provider) + # WHEN put_metadata is used + annotation_key = "BookingId" + annotation_value = "123456" tracer.put_annotation(annotation_key, annotation_value) + # THEN we should have an annotation as expected assert put_annotation_mock.call_count == 1 assert put_annotation_mock.call_args == mocker.call(key=annotation_key, value=annotation_value) @mock.patch("aws_lambda_powertools.tracing.Tracer.patch") def test_tracer_autopatch(patch_mock): - # GIVEN tracer is instantiated - # WHEN default options were used, or patch() was called - # THEN tracer should patch all modules + # GIVEN tracer is initialized + # WHEN auto_patch hasn't been explicitly disabled Tracer(disabled=True) + + # THEN tracer should patch all modules assert patch_mock.call_count == 1 @mock.patch("aws_lambda_powertools.tracing.Tracer.patch") def test_tracer_no_autopatch(patch_mock): - # GIVEN tracer is instantiated + # GIVEN tracer is initialized # WHEN auto_patch is disabled - # THEN tracer should not patch any module Tracer(disabled=True, auto_patch=False) + + # THEN tracer should not patch any module assert patch_mock.call_count == 0 -def test_tracer_lambda_handler_empty_response_metadata(mocker, provider_stub): +def test_tracer_lambda_handler_does_not_add_empty_response_as_metadata(mocker, provider_stub): + # GIVEN tracer is initialized put_metadata_mock = mocker.MagicMock() provider = provider_stub(put_metadata_mock=put_metadata_mock) tracer = Tracer(provider=provider) + # WHEN capture_lambda_handler decorator is used + # and the handler response is empty @tracer.capture_lambda_handler def handler(event, context): return handler({}, mocker.MagicMock()) + # THEN we should not add empty metadata assert put_metadata_mock.call_count == 0 -def test_tracer_method_empty_response_metadata(mocker, provider_stub): +def test_tracer_method_does_not_add_empty_response_as_metadata(mocker, provider_stub): + # GIVEN tracer is initialized put_metadata_mock = mocker.MagicMock() provider = provider_stub(put_metadata_mock=put_metadata_mock) tracer = Tracer(provider=provider) + # WHEN capture_method decorator is used + # and the method response is empty @tracer.capture_method def greeting(name, message): return greeting(name="Foo", message="Bar") + # THEN we should not add empty metadata assert put_metadata_mock.call_count == 0 @mock.patch("aws_lambda_powertools.tracing.tracer.aws_xray_sdk.core.patch") -@mock.patch("aws_lambda_powertools.tracing.tracer.aws_xray_sdk.core.patch_all") -def test_tracer_patch(xray_patch_all_mock, xray_patch_mock, mocker): - # GIVEN tracer is instantiated - # WHEN default X-Ray provider client is mocked - # THEN tracer should run just fine - - Tracer() - assert xray_patch_all_mock.call_count == 1 - +def test_tracer_patch_modules(xray_patch_mock, mocker): + # GIVEN tracer is initialized with a list of modules to patch modules = ["boto3"] + + # WHEN modules are supported by X-Ray Tracer(service="booking", patch_modules=modules) + # THEN tracer should run just fine assert xray_patch_mock.call_count == 1 assert xray_patch_mock.call_args == mocker.call(modules) def test_tracer_method_exception_metadata(mocker, provider_stub, in_subsegment_mock): - + # GIVEN tracer is initialized provider = provider_stub(in_subsegment=in_subsegment_mock.in_subsegment) tracer = Tracer(provider=provider, service="booking") + # WHEN capture_method decorator is used + # and the method raises an exception @tracer.capture_method def greeting(name, message): raise ValueError("test") @@ -206,16 +241,20 @@ def greeting(name, message): with pytest.raises(ValueError): greeting(name="Foo", message="Bar") + # THEN we should add the exception using method name as key plus error + # and their service name as the namespace put_metadata_mock_args = in_subsegment_mock.put_metadata.call_args[1] assert put_metadata_mock_args["key"] == "greeting error" assert put_metadata_mock_args["namespace"] == "booking" def test_tracer_lambda_handler_exception_metadata(mocker, provider_stub, in_subsegment_mock): - + # GIVEN tracer is initialized provider = provider_stub(in_subsegment=in_subsegment_mock.in_subsegment) tracer = Tracer(provider=provider, service="booking") + # WHEN capture_lambda_handler decorator is used + # and the method raises an exception @tracer.capture_lambda_handler def handler(event, context): raise ValueError("test") @@ -223,16 +262,21 @@ def handler(event, context): with pytest.raises(ValueError): handler({}, mocker.MagicMock()) + # THEN we should add the exception using handler name as key plus error + # and their service name as the namespace put_metadata_mock_args = in_subsegment_mock.put_metadata.call_args[1] - assert put_metadata_mock_args["key"] == "booking error" + assert put_metadata_mock_args["key"] == "handler error" + assert put_metadata_mock_args["namespace"] == "booking" @pytest.mark.asyncio async def test_tracer_method_nested_async(mocker, dummy_response, provider_stub, in_subsegment_mock): + # GIVEN tracer is initialized provider = provider_stub(in_subsegment_async=in_subsegment_mock.in_subsegment) tracer = Tracer(provider=provider, service="booking") + # WHEN capture_method decorator is used for nested async methods @tracer.capture_method async def greeting_2(name, message): return dummy_response @@ -250,6 +294,7 @@ async def greeting(name, message): ) = in_subsegment_mock.in_subsegment.call_args_list put_metadata_greeting2_call_args, put_metadata_greeting_call_args = in_subsegment_mock.put_metadata.call_args_list + # THEN we should add metadata for each response like we would for a sync decorated method assert in_subsegment_mock.in_subsegment.call_count == 2 assert in_subsegment_greeting_call_args == mocker.call(name="## greeting") assert in_subsegment_greeting2_call_args == mocker.call(name="## greeting_2") @@ -265,9 +310,10 @@ async def greeting(name, message): @pytest.mark.asyncio async def test_tracer_method_nested_async_disabled(dummy_response): - + # GIVEN tracer is initialized and explicitly disabled tracer = Tracer(service="booking", disabled=True) + # WHEN capture_method decorator is used @tracer.capture_method async def greeting_2(name, message): return dummy_response @@ -277,16 +323,19 @@ async def greeting(name, message): await greeting_2(name, message) return dummy_response + # THEN we should run the decorator methods without side effects ret = await greeting(name="Foo", message="Bar") - assert ret == dummy_response @pytest.mark.asyncio async def test_tracer_method_exception_metadata_async(mocker, provider_stub, in_subsegment_mock): + # GIVEN tracer is initialized provider = provider_stub(in_subsegment_async=in_subsegment_mock.in_subsegment) tracer = Tracer(provider=provider, service="booking") + # WHEN capture_method decorator is used in an async method + # and the method raises an exception @tracer.capture_method async def greeting(name, message): raise ValueError("test") @@ -294,6 +343,8 @@ async def greeting(name, message): with pytest.raises(ValueError): await greeting(name="Foo", message="Bar") + # THEN we should add the exception using method name as key plus error + # and their service name as the namespace put_metadata_mock_args = in_subsegment_mock.put_metadata.call_args[1] assert put_metadata_mock_args["key"] == "greeting error" assert put_metadata_mock_args["namespace"] == "booking"