Skip to content

Commit 613755c

Browse files
authored
Merge pull request #48 from DataDog/stephenf/integration-tests
Add integration tests
2 parents 6b3dab3 + f47d412 commit 613755c

File tree

57 files changed

+1058
-21
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

57 files changed

+1058
-21
lines changed

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,3 +36,6 @@ nosetests.xml
3636
.eggs/
3737
.env/
3838
.idea/
39+
40+
41+
**/.serverless/

datadog_lambda/patch.py

Lines changed: 60 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
# This product includes software developed at Datadog (https://www.datadoghq.com/).
44
# Copyright 2019 Datadog, Inc.
55

6+
import json
7+
import os
68
import sys
79
import logging
810

@@ -13,9 +15,9 @@
1315
logger = logging.getLogger(__name__)
1416

1517
if sys.version_info >= (3, 0, 0):
16-
httplib_module = 'http.client'
18+
httplib_module = "http.client"
1719
else:
18-
httplib_module = 'httplib'
20+
httplib_module = "httplib"
1921

2022
_httplib_patched = False
2123
_requests_patched = False
@@ -38,12 +40,9 @@ def _patch_httplib():
3840
global _httplib_patched
3941
if not _httplib_patched:
4042
_httplib_patched = True
41-
wrap(
42-
httplib_module,
43-
'HTTPConnection.request',
44-
_wrap_httplib_request
45-
)
46-
logger.debug('Patched %s', httplib_module)
43+
wrap(httplib_module, "HTTPConnection.request", _wrap_httplib_request)
44+
45+
logger.debug("Patched %s", httplib_module)
4746

4847

4948
def _patch_requests():
@@ -55,14 +54,10 @@ def _patch_requests():
5554
if not _requests_patched:
5655
_requests_patched = True
5756
try:
58-
wrap(
59-
'requests',
60-
'Session.request',
61-
_wrap_requests_request
62-
)
63-
logger.debug('Patched requests')
57+
wrap("requests", "Session.request", _wrap_requests_request)
58+
logger.debug("Patched requests")
6459
except Exception:
65-
logger.debug('Failed to patch requests', exc_info=True)
60+
logger.debug("Failed to patch requests", exc_info=True)
6661

6762

6863
def _wrap_requests_request(func, instance, args, kwargs):
@@ -71,12 +66,17 @@ def _wrap_requests_request(func, instance, args, kwargs):
7166
into the outgoing requests.
7267
"""
7368
context = get_dd_trace_context()
74-
if 'headers' in kwargs:
75-
kwargs['headers'].update(context)
69+
if "headers" in kwargs:
70+
kwargs["headers"].update(context)
7671
elif len(args) >= 5:
7772
args[4].update(context)
7873
else:
79-
kwargs['headers'] = context
74+
kwargs["headers"] = context
75+
76+
# If we're in an integration test, log the HTTP requests made
77+
if os.environ.get("DD_INTEGRATION_TEST", "false").lower() == "true":
78+
_print_request_string(args, kwargs)
79+
8080
return func(*args, **kwargs)
8181

8282

@@ -86,10 +86,49 @@ def _wrap_httplib_request(func, instance, args, kwargs):
8686
the Datadog trace headers into the outgoing requests.
8787
"""
8888
context = get_dd_trace_context()
89-
if 'headers' in kwargs:
90-
kwargs['headers'].update(context)
89+
if "headers" in kwargs:
90+
kwargs["headers"].update(context)
9191
elif len(args) >= 4:
9292
args[3].update(context)
9393
else:
94-
kwargs['headers'] = context
94+
kwargs["headers"] = context
95+
9596
return func(*args, **kwargs)
97+
98+
99+
def _print_request_string(args, kwargs):
100+
"""Print the request so that it can be checked in integration tests
101+
102+
Only used by integration tests.
103+
"""
104+
# Normalizes the different ways args can be passed to a request
105+
# to prevent test flakiness
106+
method = None
107+
if len(args) > 0:
108+
method = args[0]
109+
else:
110+
method = kwargs.get("method", "").upper()
111+
112+
url = None
113+
if len(args) > 1:
114+
url = args[1]
115+
else:
116+
url = kwargs.get("url")
117+
118+
# Sort the datapoints POSTed by their name so that snapshots always align
119+
data = kwargs.get("data", "{}")
120+
data_dict = json.loads(data)
121+
data_dict.get("series", []).sort(key=lambda series: series.get("metric"))
122+
sorted_data = json.dumps(data_dict)
123+
124+
# Sort headers to prevent any differences in ordering
125+
headers = kwargs.get("headers", {})
126+
sorted_headers = sorted(
127+
"{}:{}".format(key, value) for key, value in headers.items()
128+
)
129+
sorted_header_str = json.dumps(sorted_headers)
130+
print(
131+
"HTTP {} {} Headers: {} Data: {}".format(
132+
method, url, sorted_header_str, sorted_data
133+
)
134+
)

scripts/run_integration_tests.sh

Lines changed: 157 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,157 @@
1+
#!/bin/bash
2+
3+
# Usage - run commands from repo root:
4+
# To check if new changes to the layer cause changes to any snapshots:
5+
# BUILD_LAYERS=true DD_API_KEY=XXXX aws-vault exec sandbox-account-admin -- ./scripts/run_integration_tests
6+
# To regenerate snapshots:
7+
# UPDATE_SNAPSHOTS=true DD_API_KEY=XXXX aws-vault exec sandbox-account-admin -- ./scripts/run_integration_tests
8+
9+
set -e
10+
11+
# These values need to be in sync with serverless.yml, where there needs to be a function
12+
# defined for every handler_runtime combination
13+
LAMBDA_HANDLERS=("async-metrics" "sync-metrics" "http-requests")
14+
RUNTIMES=("python27" "python36" "python37" "python38")
15+
16+
LOGS_WAIT_SECONDS=20
17+
18+
script_path=${BASH_SOURCE[0]}
19+
scripts_dir=$(dirname $script_path)
20+
repo_dir=$(dirname $scripts_dir)
21+
integration_tests_dir="$repo_dir/tests/integration"
22+
23+
script_start_time=$(date --iso-8601=seconds)
24+
25+
mismatch_found=false
26+
27+
if [ -z "$DD_API_KEY" ]; then
28+
echo "No DD_API_KEY env var set, exiting"
29+
exit 1
30+
fi
31+
32+
if [ -n "$UPDATE_SNAPSHOTS" ]; then
33+
echo "Overwriting snapshots in this execution"
34+
fi
35+
36+
if [ -n "$BUILD_LAYERS" ]; then
37+
echo "Building layers that will be deployed with our test functions"
38+
source $scripts_dir/build_layers.sh
39+
else
40+
echo "Not building layers, ensure they've already been built or re-run with 'BUILD_LAYERS=true DD_API_KEY=XXXX ./scripts/run_integration_tests.sh'"
41+
fi
42+
43+
cd $integration_tests_dir
44+
input_event_files=$(ls ./input_events)
45+
# Sort event files by name so that snapshots stay consistent
46+
input_event_files=($(for file_name in ${input_event_files[@]}; do echo $file_name; done | sort))
47+
48+
echo "Deploying functions"
49+
serverless deploy
50+
51+
echo "Invoking functions"
52+
set +e # Don't exit this script if an invocation fails or there's a diff
53+
for handler_name in "${LAMBDA_HANDLERS[@]}"; do
54+
for runtime in "${RUNTIMES[@]}"; do
55+
function_name="${handler_name}_${runtime}"
56+
# Invoke function once for each input event
57+
for input_event_file in "${input_event_files[@]}"; do
58+
# Get event name without trailing ".json" so we can build the snapshot file name
59+
input_event_name=$(echo "$input_event_file" | sed "s/.json//")
60+
# Return value snapshot file format is snapshots/return_values/{handler}_{runtime}_{input-event}
61+
snapshot_path="./snapshots/return_values/${function_name}_${input_event_name}.json"
62+
63+
return_value=$(serverless invoke -f $function_name --path "./input_events/$input_event_file")
64+
65+
if [ ! -f $snapshot_path ]; then
66+
# If the snapshot file doesn't exist yet, we create it
67+
echo "Writing return value to $snapshot_path because no snapshot exists yet"
68+
echo "$return_value" >$snapshot_path
69+
elif [ -n "$UPDATE_SNAPSHOTS" ]; then
70+
# If $UPDATE_SNAPSHOTS is set to true, write the new logs over the current snapshot
71+
echo "Overwriting return value snapshot for $snapshot_path"
72+
echo "$return_value" >$snapshot_path
73+
else
74+
# Compare new return value to snapshot
75+
diff_output=$(echo "$return_value" | diff - $snapshot_path)
76+
if [ $? -eq 1 ]; then
77+
echo "Failed: Return value for $function_name does not match snapshot:"
78+
echo "$diff_output"
79+
mismatch_found=true
80+
else
81+
echo "Ok: Return value for $function_name with $input_event_name event matches snapshot"
82+
fi
83+
fi
84+
done
85+
86+
done
87+
88+
done
89+
set -e
90+
91+
echo "Sleeping $LOGS_WAIT_SECONDS seconds to wait for logs to appear in CloudWatch..."
92+
sleep $LOGS_WAIT_SECONDS
93+
94+
echo "Fetching logs for invocations and comparing to snapshots"
95+
for handler_name in "${LAMBDA_HANDLERS[@]}"; do
96+
for runtime in "${RUNTIMES[@]}"; do
97+
function_name="${handler_name}_${runtime}"
98+
function_snapshot_path="./snapshots/logs/$function_name.log"
99+
100+
# Fetch logs with serverless cli
101+
raw_logs=$(serverless logs -f $function_name --startTime $script_start_time)
102+
103+
# Replace invocation-specific data like timestamps and IDs with XXXX to normalize logs across executions
104+
logs=$(
105+
echo "$raw_logs" |
106+
# Filter serverless cli errors
107+
sed '/Serverless: Recoverable error occurred/d' |
108+
# Remove blank lines
109+
sed '/^$/d' |
110+
# Normalize Lambda runtime report logs
111+
sed -E 's/(RequestId|TraceId|SegmentId|Duration|Memory Used|"e"): [a-z0-9\.\-]+/\1: XXXX/g' |
112+
# Normalize DD APM headers and AWS account ID
113+
sed -E "s/(x-datadog-parent-id:|x-datadog-trace-id:|account_id:)[0-9]+/\1XXXX/g" |
114+
# Normalize timestamps in datapoints POSTed to DD
115+
sed -E 's/"points": \[\[[0-9\.]+,/"points": \[\[XXXX,/g' |
116+
# Strip API key from logged requests
117+
sed -E "s/(api_key=|'api_key': ')[a-z0-9\.\-]+/\1XXXX/g" |
118+
# Normalize minor package version so that these snapshots aren't broken on version bumps
119+
sed -E "s/(dd_lambda_layer:datadog-python[0-9]+_2\.)[0-9]+\.0/\1XX\.0/g"
120+
)
121+
122+
if [ ! -f $function_snapshot_path ]; then
123+
# If no snapshot file exists yet, we create one
124+
echo "Writing logs to $function_snapshot_path because no snapshot exists yet"
125+
echo "$logs" >$function_snapshot_path
126+
elif [ -n "$UPDATE_SNAPSHOTS" ]; then
127+
# If $UPDATE_SNAPSHOTS is set to true write the new logs over the current snapshot
128+
echo "Overwriting log snapshot for $function_snapshot_path"
129+
echo "$logs" >$function_snapshot_path
130+
else
131+
# Compare new logs to snapshots
132+
set +e # Don't exit this script if there is a diff
133+
diff_output=$(echo "$logs" | diff - $function_snapshot_path)
134+
if [ $? -eq 1 ]; then
135+
echo "Failed: Mismatch found between new $function_name logs (first) and snapshot (second):"
136+
echo "$diff_output"
137+
mismatch_found=true
138+
else
139+
echo "Ok: New logs for $function_name match snapshot"
140+
fi
141+
set -e
142+
fi
143+
done
144+
done
145+
146+
if [ "$mismatch_found" = true ]; then
147+
echo "FAILURE: A mismatch between new data and a snapshot was found and printed above."
148+
echo "If the change is expected, generate new snapshots by running 'UPDATE_SNAPSHOTS=true DD_API_KEY=XXXX ./scripts/run_integration_tests.sh'"
149+
exit 1
150+
fi
151+
152+
if [ -n "$UPDATE_SNAPSHOTS" ]; then
153+
echo "SUCCESS: Wrote new snapshots for all functions"
154+
exit 0
155+
fi
156+
157+
echo "SUCCESS: No difference found between snapshots and new return values or logs"

tests/integration/handle.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
import json
2+
3+
from datadog_lambda.metric import lambda_metric
4+
from datadog_lambda.wrapper import datadog_lambda_wrapper
5+
6+
7+
@datadog_lambda_wrapper
8+
def handle(event, context):
9+
# Parse request ID and record ids out of the event to include in the response
10+
request_id = event.get("requestContext", {}).get("requestId")
11+
event_records = event.get("Records", [])
12+
13+
record_ids = []
14+
for record in event_records:
15+
# SQS
16+
if record.get("messageId"):
17+
record_ids.append(record["messageId"])
18+
# SNS
19+
if record.get("Sns", {}).get("MessageId"):
20+
record_ids.append(record["Sns"]["MessageId"])
21+
22+
lambda_metric("hello.dog", 1, tags=["team:serverless", "role:hello"])
23+
lambda_metric(
24+
"tests.integration.count", 21, tags=["test:integration", "role:hello"]
25+
)
26+
27+
return {
28+
"statusCode": 200,
29+
"body": {
30+
"message": "hello, dog!",
31+
"request_id": request_id,
32+
"event_record_ids": record_ids,
33+
},
34+
}

tests/integration/http_requests.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
import json
2+
import requests
3+
4+
from datadog_lambda.metric import lambda_metric
5+
from datadog_lambda.wrapper import datadog_lambda_wrapper
6+
7+
8+
@datadog_lambda_wrapper
9+
def handle(event, context):
10+
lambda_metric("hello.dog", 1, tags=["team:serverless", "role:hello"])
11+
lambda_metric(
12+
"tests.integration.count", 21, tags=["test:integration", "role:hello"]
13+
)
14+
15+
us_response = requests.get("https://ip-ranges.datadoghq.com/")
16+
eu_response = requests.get("https://ip-ranges.datadoghq.eu/")
17+
18+
return {"statusCode": 200, "body": {"message": "hello, dog!"}}

0 commit comments

Comments
 (0)