Skip to content

Commit bda4cf9

Browse files
committed
chore: extending failure
1 parent d637f59 commit bda4cf9

File tree

2 files changed

+39
-28
lines changed

2 files changed

+39
-28
lines changed

docs/utilities/batch.md

Lines changed: 1 addition & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -314,34 +314,7 @@ For these scenarios, you can subclass `BatchProcessor` and quickly override `suc
314314
Let's suppose you'd like to add a metric named `BatchRecordFailures` for each batch record that failed processing
315315

316316
```python title="Extending failure handling mechanism in BatchProcessor"
317-
318-
from typing import Tuple
319-
320-
from aws_lambda_powertools import Metrics
321-
from aws_lambda_powertools.metrics import MetricUnit
322-
from aws_lambda_powertools.utilities.batch import BatchProcessor, ExceptionInfo, EventType, FailureResponse, process_partial_response
323-
from aws_lambda_powertools.utilities.data_classes.sqs_event import SQSRecord
324-
325-
326-
class MyProcessor(BatchProcessor):
327-
def failure_handler(self, record: SQSRecord, exception: ExceptionInfo) -> FailureResponse:
328-
metrics.add_metric(name="BatchRecordFailures", unit=MetricUnit.Count, value=1)
329-
return super().failure_handler(record, exception)
330-
331-
processor = MyProcessor(event_type=EventType.SQS)
332-
metrics = Metrics(namespace="test")
333-
334-
335-
@tracer.capture_method
336-
def record_handler(record: SQSRecord):
337-
payload: str = record.body
338-
if payload:
339-
item: dict = json.loads(payload)
340-
...
341-
342-
@metrics.log_metrics(capture_cold_start_metric=True)
343-
def lambda_handler(event, context: LambdaContext):
344-
return process_partial_response(event=event, record_handler=record_handler, processor=processor, context=context)
317+
--8<-- "examples/batch_processing/src/extending_failure.py"
345318
```
346319

347320
### Create your own partial processor
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
import json
2+
3+
from aws_lambda_powertools import Logger, Metrics, Tracer
4+
from aws_lambda_powertools.metrics import MetricUnit
5+
from aws_lambda_powertools.utilities.batch import (
6+
BatchProcessor,
7+
EventType,
8+
ExceptionInfo,
9+
FailureResponse,
10+
process_partial_response,
11+
)
12+
from aws_lambda_powertools.utilities.data_classes.sqs_event import SQSRecord
13+
from aws_lambda_powertools.utilities.typing import LambdaContext
14+
15+
16+
class MyProcessor(BatchProcessor):
17+
def failure_handler(self, record: SQSRecord, exception: ExceptionInfo) -> FailureResponse:
18+
metrics.add_metric(name="BatchRecordFailures", unit=MetricUnit.Count, value=1)
19+
return super().failure_handler(record, exception)
20+
21+
22+
processor = MyProcessor(event_type=EventType.SQS)
23+
metrics = Metrics(namespace="test")
24+
logger = Logger()
25+
tracer = Tracer()
26+
27+
28+
@tracer.capture_method
29+
def record_handler(record: SQSRecord):
30+
payload: str = record.body
31+
if payload:
32+
item: dict = json.loads(payload)
33+
logger.info(item)
34+
35+
36+
@metrics.log_metrics(capture_cold_start_metric=True)
37+
def lambda_handler(event, context: LambdaContext):
38+
return process_partial_response(event=event, record_handler=record_handler, processor=processor, context=context)

0 commit comments

Comments
 (0)