Skip to content

Commit 24f6187

Browse files
author
Tom McCarthy
committed
Merge branch 'develop' into feat/idempotency_helper
2 parents 834db1c + 7b23b5b commit 24f6187

26 files changed

+1143
-1905
lines changed

CHANGELOG.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
66

77
## [Unreleased]
88

9+
## [1.10.2] - 2021-02-04
10+
11+
### Fixed
12+
13+
* **Utilities**: Correctly handle and list multiple exceptions in SQS batch processing utility.
14+
* **Docs*:: Fix typos on AppConfig docstring import, and `SnsModel` typo in parser.
15+
* **Utilities**: `typing_extensions` package is now only installed in Python < 3.8
16+
917
## [1.10.1] - 2021-01-19
1018

1119
### Fixed

aws_lambda_powertools/tracing/tracer.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,10 @@
1414

1515
is_cold_start = True
1616
logger = logging.getLogger(__name__)
17+
# Set the streaming threshold to 0 on the default recorder to force sending
18+
# subsegments individually, rather than batching them.
19+
# See https://github.com/awslabs/aws-lambda-powertools-python/issues/283
20+
aws_xray_sdk.core.xray_recorder.configure(streaming_threshold=0)
1721

1822

1923
class Tracer:

aws_lambda_powertools/utilities/batch/base.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ def success_handler(self, record: Any, result: Any):
8585
self.success_messages.append(record)
8686
return entry
8787

88-
def failure_handler(self, record: Any, exception: Exception):
88+
def failure_handler(self, record: Any, exception: Tuple):
8989
"""
9090
Failure callback
9191
@@ -94,8 +94,9 @@ def failure_handler(self, record: Any, exception: Exception):
9494
tuple
9595
"fail", exceptions args, original record
9696
"""
97-
entry = ("fail", exception.args, record)
98-
logger.debug(f"Record processing exception: {exception}")
97+
exception_string = f"{exception[0]}:{exception[1]}"
98+
entry = ("fail", exception_string, record)
99+
logger.debug(f"Record processing exception: {exception_string}")
99100
self.exceptions.append(exception)
100101
self.fail_messages.append(record)
101102
return entry
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,25 @@
11
"""
22
Batch processing exceptions
33
"""
4+
import traceback
45

56

67
class SQSBatchProcessingError(Exception):
78
"""When at least one message within a batch could not be processed"""
9+
10+
def __init__(self, msg="", child_exceptions=()):
11+
super().__init__(msg)
12+
self.msg = msg
13+
self.child_exceptions = child_exceptions
14+
15+
# Overriding this method so we can output all child exception tracebacks when we raise this exception to prevent
16+
# errors being lost. See https://github.com/awslabs/aws-lambda-powertools-python/issues/275
17+
def __str__(self):
18+
parent_exception_str = super(SQSBatchProcessingError, self).__str__()
19+
exception_list = [f"{parent_exception_str}\n"]
20+
for exception in self.child_exceptions:
21+
extype, ex, tb = exception
22+
formatted = "".join(traceback.format_exception(extype, ex, tb))
23+
exception_list.append(formatted)
24+
25+
return "\n".join(exception_list)

aws_lambda_powertools/utilities/batch/sqs.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
Batch SQS utilities
55
"""
66
import logging
7+
import sys
78
from typing import Callable, Dict, List, Optional, Tuple
89

910
import boto3
@@ -90,10 +91,10 @@ def _process_record(self, record) -> Tuple:
9091
An object to be processed.
9192
"""
9293
try:
93-
result = self.handler(record)
94-
return self.success_handler(record, result)
95-
except Exception as exc:
96-
return self.failure_handler(record, exc)
94+
result = self.handler(record=record)
95+
return self.success_handler(record=record, result=result)
96+
except Exception:
97+
return self.failure_handler(record=record, exception=sys.exc_info())
9798

9899
def _prepare(self):
99100
"""
@@ -123,7 +124,11 @@ def _clean(self):
123124
logger.debug(f"{len(self.fail_messages)} records failed processing, but exceptions are suppressed")
124125
else:
125126
logger.debug(f"{len(self.fail_messages)} records failed processing, raising exception")
126-
raise SQSBatchProcessingError(list(self.exceptions))
127+
raise SQSBatchProcessingError(
128+
msg=f"Not all records processed succesfully. {len(self.exceptions)} individual errors logged "
129+
f"separately below.",
130+
child_exceptions=self.exceptions,
131+
)
127132

128133
return delete_message_response
129134

aws_lambda_powertools/utilities/parser/models/dynamodb.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,8 @@
22
from typing import Any, Dict, List, Optional
33

44
from pydantic import BaseModel
5-
from typing_extensions import Literal
5+
6+
from ..types import Literal
67

78

89
class DynamoDBStreamChangedRecordModel(BaseModel):

aws_lambda_powertools/utilities/parser/models/kinesis.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,8 @@
55

66
from pydantic import BaseModel, validator
77
from pydantic.types import PositiveInt
8-
from typing_extensions import Literal
8+
9+
from ..types import Literal
910

1011
logger = logging.getLogger(__name__)
1112

aws_lambda_powertools/utilities/parser/models/s3.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,8 @@
55
from pydantic.fields import Field
66
from pydantic.networks import IPvAnyNetwork
77
from pydantic.types import PositiveInt
8-
from typing_extensions import Literal
8+
9+
from ..types import Literal
910

1011

1112
class S3EventRecordGlacierRestoreEventData(BaseModel):

aws_lambda_powertools/utilities/parser/models/ses.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,8 @@
44
from pydantic import BaseModel, Field
55
from pydantic.networks import EmailStr
66
from pydantic.types import PositiveInt
7-
from typing_extensions import Literal
7+
8+
from ..types import Literal
89

910

1011
class SesReceiptVerdict(BaseModel):

aws_lambda_powertools/utilities/parser/models/sns.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,8 @@
33

44
from pydantic import BaseModel, root_validator
55
from pydantic.networks import HttpUrl
6-
from typing_extensions import Literal
6+
7+
from ..types import Literal
78

89

910
class SnsMsgAttributeModel(BaseModel):

aws_lambda_powertools/utilities/parser/models/sqs.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,8 @@
22
from typing import Dict, List, Optional
33

44
from pydantic import BaseModel
5-
from typing_extensions import Literal
5+
6+
from ..types import Literal
67

78

89
class SqsAttributesModel(BaseModel):
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,13 @@
11
"""Generics and other shared types used across parser"""
2+
23
from typing import TypeVar
34

45
from pydantic import BaseModel
56

7+
# We only need typing_extensions for python versions <3.8
8+
try:
9+
from typing import Literal # noqa: F401
10+
except ImportError:
11+
from typing_extensions import Literal # noqa: F401
12+
613
Model = TypeVar("Model", bound=BaseModel)

benchmark/.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
.aws-sam

benchmark/README.md

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# Cold Start Benchmark
2+
3+
The [benchmark.sh script](./benchmark.sh) is a bash script to compare the cold-start time of using the AWS Lambda Powertools in a semi-automated way. It does so by deploying two Lambda functions which both have the aws-lambda-powertools module installed. One Lambda function will import and initialize the three core utilities (`Metrics`, `Logger`, `Tracer`), while the other one will not.
4+
5+
Please note that this requires the [SAM CLI](https://github.com/aws/aws-sam-cli) version 1.2.0 or later.
6+
7+
## Usage
8+
9+
> **NOTE**: This script is expected to run in Unix-based systems only, and can incur charges on your AWS account.
10+
11+
To use the script, you should move into the benchmark folder and run the benchmark script:
12+
13+
```
14+
export S3_BUCKET=code-artifact-s3-bucket
15+
16+
cd benchmark
17+
./benchmark.sh
18+
```
19+
20+
This will:
21+
22+
* Deploy a CloudFormation stack using guided SAM deployment (*you will need to answer a few questions*).
23+
* Run loops to update the memory setting of the functions to force a cold start, then invoke them. This process is repeated a number of time to get more consistent results.
24+
* Wait 2.5 minutes to ensure data propagates from CloudWatch Logs to CloudWatch Logs Insights.
25+
* Run a query on CloudWatch Logs insights, looking at the **REPORT** line from the logs.
26+
* Delete the CloudFormation stack.

benchmark/benchmark.sh

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
#!/bin/bash
2+
3+
set -e
4+
trap cleanup EXIT
5+
6+
if [ -z "S3_BUCKET" ]; then
7+
echo "Missing S3_BUCKET environment variabe"
8+
exit 1
9+
fi
10+
11+
export BENCHMARK_STACK_NAME=${BENCHMARK_STACK_NAME:-"powertools-benchmark"}
12+
13+
function cleanup {
14+
echo "Cleaning up stack..."
15+
aws cloudformation delete-stack --stack-name $BENCHMARK_STACK_NAME
16+
}
17+
18+
function run_function {
19+
# Update function to force a cold start
20+
aws lambda update-function-configuration --function-name $1 --memory-size 256 >/dev/null
21+
aws lambda update-function-configuration --function-name $1 --memory-size 128 >/dev/null
22+
# Cold-start invoke
23+
aws lambda invoke --function-name $1 --payload '{}' /dev/null >/dev/null && echo -n . || echo -n e
24+
}
25+
26+
# Retrieve statistics
27+
function get_stats {
28+
# Gather results from CloudWatch Logs Insights
29+
query_id=$(aws logs start-query --log-group-name $1 --query-string 'filter @type = "REPORT" | stats pct(@initDuration, 50) as init_duration, pct(@duration, 50) as duration' --start-time $(expr $(date +%s) - 86400) --end-time $(expr $(date +%s) + 0) --query 'queryId' --output text)
30+
while true; do
31+
result=$(aws logs get-query-results --query-id $query_id --query 'status' --output text)
32+
if [ $result == "Complete" ]; then
33+
break
34+
fi
35+
sleep 1
36+
done
37+
38+
# Check if greater than threshold and print result
39+
init_duration=$(aws logs get-query-results --query-id $query_id --query 'results[0][?field==`init_duration`].value' --output text)
40+
duration=$(aws logs get-query-results --query-id $query_id --query 'results[0][?field==`duration`].value' --output text)
41+
echo "$init_duration,$duration"
42+
}
43+
44+
# Build and deploy the benchmark stack
45+
echo "Building and deploying..."
46+
sam build
47+
sam deploy --stack-name $BENCHMARK_STACK_NAME --s3-bucket $S3_BUCKET --capabilities CAPABILITY_IAM
48+
49+
# Retrieve output values
50+
echo "Retrieve values..."
51+
export INSTRUMENTED_FUNCTION=$(aws cloudformation describe-stacks --stack-name $BENCHMARK_STACK_NAME --query 'Stacks[0].Outputs[?OutputKey==`InstrumentedFunction`].OutputValue' --output text)
52+
export REFERENCE_FUNCTION=$(aws cloudformation describe-stacks --stack-name $BENCHMARK_STACK_NAME --query 'Stacks[0].Outputs[?OutputKey==`ReferenceFunction`].OutputValue' --output text)
53+
export INSTRUMENTED_LOG_GROUP=$(aws cloudformation describe-stacks --stack-name $BENCHMARK_STACK_NAME --query 'Stacks[0].Outputs[?OutputKey==`InstrumentedLogGroup`].OutputValue' --output text)
54+
export REFERENCE_LOG_GROUP=$(aws cloudformation describe-stacks --stack-name $BENCHMARK_STACK_NAME --query 'Stacks[0].Outputs[?OutputKey==`ReferenceLogGroup`].OutputValue' --output text)
55+
56+
echo INSTRUMENTED_FUNCTION=$INSTRUMENTED_FUNCTION
57+
echo REFERENCE_FUNCTION=$REFERENCE_FUNCTION
58+
echo INSTRUMENTED_LOG_GROUP=$INSTRUMENTED_LOG_GROUP
59+
echo REFERENCE_LOG_GROUP=$REFERENCE_LOG_GROUP
60+
61+
# Running cold starts
62+
echo "Running functions..."
63+
for i in {0..20}; do
64+
run_function $INSTRUMENTED_FUNCTION
65+
done &
66+
process_id=$!
67+
for i in {0..20}; do
68+
run_function $REFERENCE_FUNCTION
69+
done &
70+
wait $process_id
71+
wait $!
72+
echo
73+
74+
# Gather statistics
75+
# Waiting 2.5 minutes to make sure the data propagates from CloudWatch Logs
76+
# into CloudWatch Logs Insights.
77+
echo "Waiting for data to propagate in CloudWatch Logs Insights..."
78+
sleep 150
79+
return_code=0
80+
echo "INSTRUMENTED=$(get_stats $INSTRUMENTED_LOG_GROUP)"
81+
echo "REFERENCE=$(get_stats $REFERENCE_LOG_GROUP)"
82+
83+
exit $return_code

benchmark/src/instrumented/main.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
from aws_lambda_powertools import (Logger, Metrics, Tracer)
2+
3+
4+
# Initialize core utilities
5+
logger = Logger()
6+
metrics = Metrics()
7+
tracer = Tracer()
8+
9+
10+
# Instrument Lambda function
11+
@logger.inject_lambda_context
12+
@metrics.log_metrics
13+
@tracer.capture_lambda_handler
14+
def handler(event, context):
15+
return {
16+
"message": "success"
17+
}
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
aws-lambda-powertools

benchmark/src/reference/main.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
def handler(event, context):
2+
return {
3+
"message": "success"
4+
}
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
aws-lambda-powertools

benchmark/template.yaml

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
AWSTemplateFormatVersion: '2010-09-09'
2+
Transform: AWS::Serverless-2016-10-31
3+
4+
Globals:
5+
Function:
6+
Handler: main.handler
7+
Runtime: python3.8
8+
MemorySize: 128
9+
Tracing: Active
10+
Environment:
11+
Variables:
12+
POWERTOOLS_SERVICE_NAME: benchmark
13+
POWERTOOLS_METRICS_NAMESPACE: LambdaPowertools
14+
POWERTOOLS_LOGGER_LOG_EVENT: "true"
15+
LOG_LEVEL: INFO
16+
17+
Resources:
18+
InstrumentedFunction:
19+
Type: AWS::Serverless::Function
20+
Properties:
21+
CodeUri: ./src/instrumented/
22+
23+
ReferenceFunction:
24+
Type: AWS::Serverless::Function
25+
Properties:
26+
CodeUri: ./src/reference/
27+
28+
InstrumentedLogGroup:
29+
Type: AWS::Logs::LogGroup
30+
Properties:
31+
LogGroupName: !Sub "/aws/lambda/${InstrumentedFunction}"
32+
RetentionInDays: 7
33+
34+
ReferenceLogGroup:
35+
Type: AWS::Logs::LogGroup
36+
Properties:
37+
LogGroupName: !Sub "/aws/lambda/${ReferenceFunction}"
38+
RetentionInDays: 7
39+
40+
Outputs:
41+
InstrumentedFunction:
42+
Value: !Ref InstrumentedFunction
43+
ReferenceFunction:
44+
Value: !Ref ReferenceFunction
45+
InstrumentedLogGroup:
46+
Value: !Ref InstrumentedLogGroup
47+
ReferenceLogGroup:
48+
Value: !Ref ReferenceLogGroup

0 commit comments

Comments
 (0)