diff --git a/.github/workflows/python_build.yml b/.github/workflows/python_build.yml index 26fbaeb3c4e..6dc4446ee14 100644 --- a/.github/workflows/python_build.yml +++ b/.github/workflows/python_build.yml @@ -37,7 +37,7 @@ jobs: - name: Complexity baseline run: make complexity-baseline - name: Upload coverage to Codecov - uses: codecov/codecov-action@v2.0.2 + uses: codecov/codecov-action@v2.1.0 with: file: ./coverage.xml # flags: unittests diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 00000000000..cf0445d7d27 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,12 @@ +[MESSAGES CONTROL] +disable= + too-many-arguments, + too-many-instance-attributes, + too-few-public-methods, + anomalous-backslash-in-string, + missing-class-docstring, + missing-module-docstring, + missing-function-docstring, + +[FORMAT] +max-line-length=120 diff --git a/CHANGELOG.md b/CHANGELOG.md index f8993c40c73..70b8d32c004 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,70 @@ This project follows [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) fo ## [Unreleased] +## 1.21.0 - 2021-10-05 + +### Bug Fixes + +* **data-classes:** use correct asdict funciton ([#666](https://github.com/awslabs/aws-lambda-powertools-python/issues/666)) +* **feature-flags:** rules should evaluate with an AND op ([#724](https://github.com/awslabs/aws-lambda-powertools-python/issues/724)) +* **idempotency:** sorting keys before hashing ([#722](https://github.com/awslabs/aws-lambda-powertools-python/issues/722)) +* **idempotency:** sorting keys before hashing +* **logger:** push extra keys to the end ([#722](https://github.com/awslabs/aws-lambda-powertools-python/issues/722)) +* **mypy:** a few return types, type signatures, and untyped areas ([#718](https://github.com/awslabs/aws-lambda-powertools-python/issues/718)) + +### Code Refactoring + +* **data-classes:** clean up internal logic for APIGatewayAuthorizerResponse ([#643](https://github.com/awslabs/aws-lambda-powertools-python/issues/643)) + +### Documentation + +* Terraform reference for SAR Lambda Layer ([#716](https://github.com/awslabs/aws-lambda-powertools-python/issues/716)) +* **event-handler:** document catch-all routes ([#705](https://github.com/awslabs/aws-lambda-powertools-python/issues/705)) +* **idempotency:** fix misleading idempotent examples ([#661](https://github.com/awslabs/aws-lambda-powertools-python/issues/661)) +* **jmespath:** clarify envelope terminology +* **parser:** fix incorrect import in root_validator example ([#735](https://github.com/awslabs/aws-lambda-powertools-python/issues/735)) + +### Features + +* expose jmespath powertools functions ([#736](https://github.com/awslabs/aws-lambda-powertools-python/issues/736)) +* boto3 sessions in batch, parameters & idempotency ([#717](https://github.com/awslabs/aws-lambda-powertools-python/issues/717)) +* **feature-flags**: add get_raw_configuration property in store; expose store ([#720](https://github.com/awslabs/aws-lambda-powertools-python/issues/720)) +* **feature-flags:** Bring your own logger for debug ([#709](https://github.com/awslabs/aws-lambda-powertools-python/issues/709)) +* **feature-flags:** improve "IN/NOT_IN"; new rule actions ([#710](https://github.com/awslabs/aws-lambda-powertools-python/issues/710)) +* **feature-flags:** get_raw_configuration property in Store ([#720](https://github.com/awslabs/aws-lambda-powertools-python/issues/720)) +* **feature_flags:** Added inequality conditions ([#721](https://github.com/awslabs/aws-lambda-powertools-python/issues/721)) +* **idempotency:** makes customers unit testing easier ([#719](https://github.com/awslabs/aws-lambda-powertools-python/issues/719)) +* **validator:** include missing data elements from a validation error ([#686](https://github.com/awslabs/aws-lambda-powertools-python/issues/686)) + +### Maintenance + +* add python 3.9 support +* **deps:** bump boto3 from 1.18.51 to 1.18.54 ([#733](https://github.com/awslabs/aws-lambda-powertools-python/issues/733)) +* **deps:** bump boto3 from 1.18.32 to 1.18.38 ([#671](https://github.com/awslabs/aws-lambda-powertools-python/issues/671)) +* **deps:** bump boto3 from 1.18.38 to 1.18.41 ([#677](https://github.com/awslabs/aws-lambda-powertools-python/issues/677)) +* **deps:** bump boto3 from 1.18.49 to 1.18.51 ([#713](https://github.com/awslabs/aws-lambda-powertools-python/issues/713)) +* **deps:** bump boto3 from 1.18.41 to 1.18.49 ([#703](https://github.com/awslabs/aws-lambda-powertools-python/issues/703)) +* **deps:** bump codecov/codecov-action from 2.0.2 to 2.1.0 ([#675](https://github.com/awslabs/aws-lambda-powertools-python/issues/675)) +* **deps-dev:** bump coverage from 5.5 to 6.0 ([#732](https://github.com/awslabs/aws-lambda-powertools-python/issues/732)) +* **deps-dev:** bump mkdocs-material from 7.2.8 to 7.3.0 ([#695](https://github.com/awslabs/aws-lambda-powertools-python/issues/695)) +* **deps-dev:** bump mkdocs-material from 7.2.6 to 7.2.8 ([#682](https://github.com/awslabs/aws-lambda-powertools-python/issues/682)) +* **deps-dev:** bump flake8-bugbear from 21.4.3 to 21.9.1 ([#676](https://github.com/awslabs/aws-lambda-powertools-python/issues/676)) +* **deps-dev:** bump flake8-bugbear from 21.9.1 to 21.9.2 ([#712](https://github.com/awslabs/aws-lambda-powertools-python/issues/712)) +* **deps-dev:** bump radon from 4.5.2 to 5.1.0 ([#673](https://github.com/awslabs/aws-lambda-powertools-python/issues/673)) +* **deps-dev:** bump mkdocs-material from 7.3.0 to 7.3.1 ([#731](https://github.com/awslabs/aws-lambda-powertools-python/issues/731)) +* **deps-dev:** bump xenon from 0.7.3 to 0.8.0 ([#669](https://github.com/awslabs/aws-lambda-powertools-python/issues/669)) + +### Bug Fixes + +* **event-handler:** fix issue with strip_prefixes and root level resolvers ([#646](https://github.com/awslabs/aws-lambda-powertools-python/issues/646)) + +### Maintenance + +* **deps:** bump boto3 from 1.18.26 to 1.18.32 ([#663](https://github.com/awslabs/aws-lambda-powertools-python/issues/663)) +* **deps-dev:** bump mkdocs-material from 7.2.4 to 7.2.6 ([#665](https://github.com/awslabs/aws-lambda-powertools-python/issues/665)) +* **deps-dev:** bump pytest from 6.2.4 to 6.2.5 ([#662](https://github.com/awslabs/aws-lambda-powertools-python/issues/662)) +* **deps-dev:** bump mike from 0.6.0 to 1.0.1 ([#453](https://github.com/awslabs/aws-lambda-powertools-python/issues/453)) +* **license:** add third party license to pyproject.toml ([#641](https://github.com/awslabs/aws-lambda-powertools-python/issues/641)) ## 1.20.2 - 2021-09-02 ### Bug Fixes diff --git a/README.md b/README.md index 46a3671f93b..c4778595366 100644 --- a/README.md +++ b/README.md @@ -2,13 +2,15 @@ ![Build](https://github.com/awslabs/aws-lambda-powertools/workflows/Powertools%20Python/badge.svg?branch=master) [![codecov.io](https://codecov.io/github/awslabs/aws-lambda-powertools-python/branch/develop/graphs/badge.svg)](https://app.codecov.io/gh/awslabs/aws-lambda-powertools-python) -![PythonSupport](https://img.shields.io/static/v1?label=python&message=3.6%20|%203.7|%203.8&color=blue?style=flat-square&logo=python) ![PyPI version](https://badge.fury.io/py/aws-lambda-powertools.svg) ![PyPi monthly downloads](https://img.shields.io/pypi/dm/aws-lambda-powertools) +![PythonSupport](https://img.shields.io/static/v1?label=python&message=3.6%20|%203.7|%203.8|%203.9&color=blue?style=flat-square&logo=python) ![PyPI version](https://badge.fury.io/py/aws-lambda-powertools.svg) ![PyPi monthly downloads](https://img.shields.io/pypi/dm/aws-lambda-powertools) A suite of Python utilities for AWS Lambda functions to ease adopting best practices such as tracing, structured logging, custom metrics, and more. ([AWS Lambda Powertools Java](https://github.com/awslabs/aws-lambda-powertools-java) is also available). + + **[📜Documentation](https://awslabs.github.io/aws-lambda-powertools-python/)** | **[🐍PyPi](https://pypi.org/project/aws-lambda-powertools/)** | **[Roadmap](https://github.com/awslabs/aws-lambda-powertools-roadmap/projects/1)** | **[Quick hello world example](https://github.com/aws-samples/cookiecutter-aws-sam-python)** | **[Detailed blog post](https://aws.amazon.com/blogs/opensource/simplifying-serverless-best-practices-with-lambda-powertools/)** -> **Join us on the AWS Developers Slack at `#lambda-powertools`** - **[Invite, if you don't have an account](https://join.slack.com/t/awsdevelopers/shared_invite/zt-gu30gquv-EhwIYq3kHhhysaZ2aIX7ew)** +> **An AWS Developer Acceleration (DevAx) initiative by Specialist Solution Architects | aws-devax-open-source@amazon.com** ## Features @@ -42,6 +44,12 @@ With [pip](https://pip.pypa.io/en/latest/index.html) installed, run: ``pip insta * Structured logging initial implementation from [aws-lambda-logging](https://gitlab.com/hadrien/aws_lambda_logging) * Powertools idea [DAZN Powertools](https://github.com/getndazn/dazn-lambda-powertools/) + +## Connect + +* **AWS Developers Slack**: `#lambda-powertools`** - **[Invite, if you don't have an account](https://join.slack.com/t/awsdevelopers/shared_invite/zt-gu30gquv-EhwIYq3kHhhysaZ2aIX7ew)** +* **Email**: aws-lambda-powertools-feedback@amazon.com + ## License This library is licensed under the MIT-0 License. See the LICENSE file. diff --git a/aws_lambda_powertools/logging/formatter.py b/aws_lambda_powertools/logging/formatter.py index de9254a3371..246fa171d4e 100644 --- a/aws_lambda_powertools/logging/formatter.py +++ b/aws_lambda_powertools/logging/formatter.py @@ -58,7 +58,7 @@ class LambdaPowertoolsFormatter(BasePowertoolsFormatter): def __init__( self, json_serializer: Optional[Callable[[Dict], str]] = None, - json_deserializer: Optional[Callable[[Dict], str]] = None, + json_deserializer: Optional[Callable[[Union[Dict, str, bool, int, float]], str]] = None, json_default: Optional[Callable[[Any], Any]] = None, datefmt: Optional[str] = None, log_record_order: Optional[List[str]] = None, @@ -106,7 +106,7 @@ def __init__( self.update_formatter = self.append_keys # alias to old method if self.utc: - self.converter = time.gmtime + self.converter = time.gmtime # type: ignore super(LambdaPowertoolsFormatter, self).__init__(datefmt=self.datefmt) @@ -128,7 +128,7 @@ def format(self, record: logging.LogRecord) -> str: # noqa: A003 return self.serialize(log=formatted_log) def formatTime(self, record: logging.LogRecord, datefmt: Optional[str] = None) -> str: - record_ts = self.converter(record.created) + record_ts = self.converter(record.created) # type: ignore if datefmt: return time.strftime(datefmt, record_ts) @@ -201,7 +201,7 @@ def _extract_log_exception(self, log_record: logging.LogRecord) -> Union[Tuple[s Log record with constant traceback info and exception name """ if log_record.exc_info: - return self.formatException(log_record.exc_info), log_record.exc_info[0].__name__ + return self.formatException(log_record.exc_info), log_record.exc_info[0].__name__ # type: ignore return None, None @@ -222,7 +222,7 @@ def _extract_log_keys(self, log_record: logging.LogRecord) -> Dict[str, Any]: record_dict["asctime"] = self.formatTime(record=log_record, datefmt=self.datefmt) extras = {k: v for k, v in record_dict.items() if k not in RESERVED_LOG_ATTRS} - formatted_log = {**extras} + formatted_log = {} # Iterate over a default or existing log structure # then replace any std log attribute e.g. '%(level)s' to 'INFO', '%(process)d to '4773' @@ -233,6 +233,7 @@ def _extract_log_keys(self, log_record: logging.LogRecord) -> Dict[str, Any]: else: formatted_log[key] = value + formatted_log.update(**extras) return formatted_log @staticmethod diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 35054f86137..0b9b52f8824 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -361,7 +361,7 @@ def registered_handler(self) -> logging.Handler: return handlers[0] @property - def registered_formatter(self) -> Optional[PowertoolsFormatter]: + def registered_formatter(self) -> PowertoolsFormatter: """Convenience property to access logger formatter""" return self.registered_handler.formatter # type: ignore @@ -405,7 +405,9 @@ def get_correlation_id(self) -> Optional[str]: str, optional Value for the correlation id """ - return self.registered_formatter.log_format.get("correlation_id") + if isinstance(self.registered_formatter, LambdaPowertoolsFormatter): + return self.registered_formatter.log_format.get("correlation_id") + return None @staticmethod def _get_log_level(level: Union[str, int, None]) -> Union[str, int]: @@ -444,7 +446,7 @@ def set_package_logger( ------- **Enables debug logging for AWS Lambda Powertools package** - >>> from aws_lambda_powertools.logging.logger import set_package_logger + >>> aws_lambda_powertools.logging.logger import set_package_logger >>> set_package_logger() Parameters diff --git a/aws_lambda_powertools/metrics/base.py b/aws_lambda_powertools/metrics/base.py index 853f06f210b..25e502d0887 100644 --- a/aws_lambda_powertools/metrics/base.py +++ b/aws_lambda_powertools/metrics/base.py @@ -90,7 +90,7 @@ def __init__( self._metric_unit_options = list(MetricUnit.__members__) self.metadata_set = metadata_set if metadata_set is not None else {} - def add_metric(self, name: str, unit: Union[MetricUnit, str], value: float): + def add_metric(self, name: str, unit: Union[MetricUnit, str], value: float) -> None: """Adds given metric Example @@ -215,7 +215,7 @@ def serialize_metric_set( **metric_names_and_values, # "single_metric": 1.0 } - def add_dimension(self, name: str, value: str): + def add_dimension(self, name: str, value: str) -> None: """Adds given dimension to all metrics Example @@ -241,7 +241,7 @@ def add_dimension(self, name: str, value: str): # checking before casting improves performance in most cases self.dimension_set[name] = value if isinstance(value, str) else str(value) - def add_metadata(self, key: str, value: Any): + def add_metadata(self, key: str, value: Any) -> None: """Adds high cardinal metadata for metrics object This will not be available during metrics visualization. diff --git a/aws_lambda_powertools/metrics/metric.py b/aws_lambda_powertools/metrics/metric.py index 1ac2bd9450e..a30f428e38e 100644 --- a/aws_lambda_powertools/metrics/metric.py +++ b/aws_lambda_powertools/metrics/metric.py @@ -42,7 +42,7 @@ class SingleMetric(MetricManager): Inherits from `aws_lambda_powertools.metrics.base.MetricManager` """ - def add_metric(self, name: str, unit: Union[MetricUnit, str], value: float): + def add_metric(self, name: str, unit: Union[MetricUnit, str], value: float) -> None: """Method to prevent more than one metric being created Parameters diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index fafc604b505..23e9f542eea 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -2,8 +2,9 @@ import json import logging import warnings -from typing import Any, Callable, Dict, Optional +from typing import Any, Callable, Dict, Optional, Union, cast +from ..shared.types import AnyCallableT from .base import MetricManager, MetricUnit from .metric import single_metric @@ -87,7 +88,7 @@ def __init__(self, service: Optional[str] = None, namespace: Optional[str] = Non service=self.service, ) - def set_default_dimensions(self, **dimensions): + def set_default_dimensions(self, **dimensions) -> None: """Persist dimensions across Lambda invocations Parameters @@ -113,10 +114,10 @@ def lambda_handler(): self.default_dimensions.update(**dimensions) - def clear_default_dimensions(self): + def clear_default_dimensions(self) -> None: self.default_dimensions.clear() - def clear_metrics(self): + def clear_metrics(self) -> None: logger.debug("Clearing out existing metric set from memory") self.metric_set.clear() self.dimension_set.clear() @@ -125,11 +126,11 @@ def clear_metrics(self): def log_metrics( self, - lambda_handler: Optional[Callable[[Any, Any], Any]] = None, + lambda_handler: Union[Callable[[Dict, Any], Any], Optional[Callable[[Dict, Any, Optional[Dict]], Any]]] = None, capture_cold_start_metric: bool = False, raise_on_empty_metrics: bool = False, default_dimensions: Optional[Dict[str, str]] = None, - ): + ) -> AnyCallableT: """Decorator to serialize and publish metrics at the end of a function execution. Be aware that the log_metrics **does call* the decorated function (e.g. lambda_handler). @@ -169,11 +170,14 @@ def handler(event, context): # Return a partial function with args filled if lambda_handler is None: logger.debug("Decorator called with parameters") - return functools.partial( - self.log_metrics, - capture_cold_start_metric=capture_cold_start_metric, - raise_on_empty_metrics=raise_on_empty_metrics, - default_dimensions=default_dimensions, + return cast( + AnyCallableT, + functools.partial( + self.log_metrics, + capture_cold_start_metric=capture_cold_start_metric, + raise_on_empty_metrics=raise_on_empty_metrics, + default_dimensions=default_dimensions, + ), ) @functools.wraps(lambda_handler) @@ -194,9 +198,9 @@ def decorate(event, context): return response - return decorate + return cast(AnyCallableT, decorate) - def __add_cold_start_metric(self, context: Any): + def __add_cold_start_metric(self, context: Any) -> None: """Add cold start metric and function_name dimension Parameters diff --git a/aws_lambda_powertools/middleware_factory/factory.py b/aws_lambda_powertools/middleware_factory/factory.py index 74858bf6709..8ab16c5e8b7 100644 --- a/aws_lambda_powertools/middleware_factory/factory.py +++ b/aws_lambda_powertools/middleware_factory/factory.py @@ -118,7 +118,7 @@ def final_decorator(func: Optional[Callable] = None, **kwargs): if not inspect.isfunction(func): # @custom_middleware(True) vs @custom_middleware(log_event=True) raise MiddlewareInvalidArgumentError( - f"Only keyword arguments is supported for middlewares: {decorator.__qualname__} received {func}" + f"Only keyword arguments is supported for middlewares: {decorator.__qualname__} received {func}" # type: ignore # noqa: E501 ) @functools.wraps(func) diff --git a/aws_lambda_powertools/shared/constants.py b/aws_lambda_powertools/shared/constants.py index 622ffbce47b..45b46d236f9 100644 --- a/aws_lambda_powertools/shared/constants.py +++ b/aws_lambda_powertools/shared/constants.py @@ -21,3 +21,5 @@ XRAY_SDK_MODULE: str = "aws_xray_sdk" XRAY_SDK_CORE_MODULE: str = "aws_xray_sdk.core" + +IDEMPOTENCY_DISABLED_ENV: str = "POWERTOOLS_IDEMPOTENCY_DISABLED" diff --git a/aws_lambda_powertools/tracing/tracer.py b/aws_lambda_powertools/tracing/tracer.py index dc010a3712f..2626793304c 100644 --- a/aws_lambda_powertools/tracing/tracer.py +++ b/aws_lambda_powertools/tracing/tracer.py @@ -17,7 +17,7 @@ logger = logging.getLogger(__name__) aws_xray_sdk = LazyLoader(constants.XRAY_SDK_MODULE, globals(), constants.XRAY_SDK_MODULE) -aws_xray_sdk.core = LazyLoader(constants.XRAY_SDK_CORE_MODULE, globals(), constants.XRAY_SDK_CORE_MODULE) +aws_xray_sdk.core = LazyLoader(constants.XRAY_SDK_CORE_MODULE, globals(), constants.XRAY_SDK_CORE_MODULE) # type: ignore # noqa: E501 class Tracer: diff --git a/aws_lambda_powertools/utilities/batch/sqs.py b/aws_lambda_powertools/utilities/batch/sqs.py index e37fdbd3fb5..38773a399dd 100644 --- a/aws_lambda_powertools/utilities/batch/sqs.py +++ b/aws_lambda_powertools/utilities/batch/sqs.py @@ -31,6 +31,8 @@ class PartialSQSProcessor(BasePartialProcessor): botocore config object suppress_exception: bool, optional Supress exception raised if any messages fail processing, by default False + boto3_session : boto3.session.Session, optional + Boto3 session to use for AWS API communication Example @@ -56,12 +58,18 @@ class PartialSQSProcessor(BasePartialProcessor): """ - def __init__(self, config: Optional[Config] = None, suppress_exception: bool = False): + def __init__( + self, + config: Optional[Config] = None, + suppress_exception: bool = False, + boto3_session: Optional[boto3.session.Session] = None, + ): """ Initializes sqs client. """ config = config or Config() - self.client = boto3.client("sqs", config=config) + session = boto3_session or boto3.session.Session() + self.client = session.client("sqs", config=config) self.suppress_exception = suppress_exception super().__init__() @@ -142,6 +150,7 @@ def sqs_batch_processor( record_handler: Callable, config: Optional[Config] = None, suppress_exception: bool = False, + boto3_session: Optional[boto3.session.Session] = None, ): """ Middleware to handle SQS batch event processing @@ -160,6 +169,8 @@ def sqs_batch_processor( botocore config object suppress_exception: bool, optional Supress exception raised if any messages fail processing, by default False + boto3_session : boto3.session.Session, optional + Boto3 session to use for AWS API communication Examples -------- @@ -180,7 +191,9 @@ def sqs_batch_processor( """ config = config or Config() - processor = PartialSQSProcessor(config=config, suppress_exception=suppress_exception) + session = boto3_session or boto3.session.Session() + + processor = PartialSQSProcessor(config=config, suppress_exception=suppress_exception, boto3_session=session) records = event["Records"] diff --git a/aws_lambda_powertools/utilities/data_classes/api_gateway_authorizer_event.py b/aws_lambda_powertools/utilities/data_classes/api_gateway_authorizer_event.py index 29694eacd97..4682711af92 100644 --- a/aws_lambda_powertools/utilities/data_classes/api_gateway_authorizer_event.py +++ b/aws_lambda_powertools/utilities/data_classes/api_gateway_authorizer_event.py @@ -234,10 +234,12 @@ def raw_query_string(self) -> str: @property def cookies(self) -> List[str]: + """Cookies""" return self["cookies"] @property def headers(self) -> Dict[str, str]: + """Http headers""" return self["headers"] @property @@ -314,6 +316,8 @@ def asdict(self) -> dict: class HttpVerb(enum.Enum): + """Enum of http methods / verbs""" + GET = "GET" POST = "POST" PUT = "PUT" @@ -324,15 +328,32 @@ class HttpVerb(enum.Enum): ALL = "*" +DENY_ALL_RESPONSE = { + "principalId": "deny-all-user", + "policyDocument": { + "Version": "2012-10-17", + "Statement": [ + { + "Action": "execute-api:Invoke", + "Effect": "Deny", + "Resource": ["*"], + } + ], + }, +} + + class APIGatewayAuthorizerResponse: - """Api Gateway HTTP API V1 payload or Rest api authorizer response helper + """The IAM Policy Response required for API Gateway REST APIs and HTTP APIs. Based on: - https://github.com/awslabs/aws-apigateway-lambda-authorizer-blueprints/blob/\ master/blueprints/python/api-gateway-authorizer-python.py - """ - version = "2012-10-17" - """The policy version used for the evaluation. This should always be '2012-10-17'""" + Documentation: + ------------- + - https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-lambda-authorizer.html + - https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-lambda-authorizer-output.html + """ path_regex = r"^[/.a-zA-Z0-9-\*]+$" """The regular expression used to validate resource paths for the policy""" @@ -345,6 +366,7 @@ def __init__( api_id: str, stage: str, context: Optional[Dict] = None, + usage_identifier_key: Optional[str] = None, ): """ Parameters @@ -373,6 +395,10 @@ def __init__( context : Dict, optional Optional, context. Note: only names of type string and values of type int, string or boolean are supported + usage_identifier_key: str, optional + If the API uses a usage plan (the apiKeySource is set to `AUTHORIZER`), the Lambda authorizer function + must return one of the usage plan's API keys as the usageIdentifierKey property value. + > **Note:** This only applies for REST APIs. """ self.principal_id = principal_id self.region = region @@ -380,25 +406,46 @@ def __init__( self.api_id = api_id self.stage = stage self.context = context + self.usage_identifier_key = usage_identifier_key self._allow_routes: List[Dict] = [] self._deny_routes: List[Dict] = [] + self._resource_pattern = re.compile(self.path_regex) - def _add_route(self, effect: str, verb: str, resource: str, conditions: List[Dict]): + @staticmethod + def from_route_arn( + arn: str, + principal_id: str, + context: Optional[Dict] = None, + usage_identifier_key: Optional[str] = None, + ) -> "APIGatewayAuthorizerResponse": + parsed_arn = parse_api_gateway_arn(arn) + return APIGatewayAuthorizerResponse( + principal_id, + parsed_arn.region, + parsed_arn.aws_account_id, + parsed_arn.api_id, + parsed_arn.stage, + context, + usage_identifier_key, + ) + + def _add_route(self, effect: str, http_method: str, resource: str, conditions: Optional[List[Dict]] = None): """Adds a route to the internal lists of allowed or denied routes. Each object in the internal list contains a resource ARN and a condition statement. The condition statement can be null.""" - if verb != "*" and verb not in HttpVerb.__members__: + if http_method != "*" and http_method not in HttpVerb.__members__: allowed_values = [verb.value for verb in HttpVerb] - raise ValueError(f"Invalid HTTP verb: '{verb}'. Use either '{allowed_values}'") + raise ValueError(f"Invalid HTTP verb: '{http_method}'. Use either '{allowed_values}'") - resource_pattern = re.compile(self.path_regex) - if not resource_pattern.match(resource): + if not self._resource_pattern.match(resource): raise ValueError(f"Invalid resource path: {resource}. Path should match {self.path_regex}") if resource[:1] == "/": resource = resource[1:] - resource_arn = APIGatewayRouteArn(self.region, self.aws_account_id, self.api_id, self.stage, verb, resource).arn + resource_arn = APIGatewayRouteArn( + self.region, self.aws_account_id, self.api_id, self.stage, http_method, resource + ).arn route = {"resourceArn": resource_arn, "conditions": conditions} @@ -412,24 +459,27 @@ def _get_empty_statement(effect: str) -> Dict[str, Any]: """Returns an empty statement object prepopulated with the correct action and the desired effect.""" return {"Action": "execute-api:Invoke", "Effect": effect.capitalize(), "Resource": []} - def _get_statement_for_effect(self, effect: str, methods: List) -> List: - """This function loops over an array of objects containing a resourceArn and - conditions statement and generates the array of statements for the policy.""" - if len(methods) == 0: + def _get_statement_for_effect(self, effect: str, routes: List[Dict]) -> List[Dict]: + """This function loops over an array of objects containing a `resourceArn` and + `conditions` statement and generates the array of statements for the policy.""" + if not routes: return [] - statements = [] - + statements: List[Dict] = [] statement = self._get_empty_statement(effect) - for method in methods: - if method["conditions"] is None or len(method["conditions"]) == 0: - statement["Resource"].append(method["resourceArn"]) - else: + + for route in routes: + resource_arn = route["resourceArn"] + conditions = route.get("conditions") + if conditions is not None and len(conditions) > 0: conditional_statement = self._get_empty_statement(effect) - conditional_statement["Resource"].append(method["resourceArn"]) - conditional_statement["Condition"] = method["conditions"] + conditional_statement["Resource"].append(resource_arn) + conditional_statement["Condition"] = conditions statements.append(conditional_statement) + else: + statement["Resource"].append(resource_arn) + if len(statement["Resource"]) > 0: statements.append(statement) @@ -442,7 +492,7 @@ def allow_all_routes(self, http_method: str = HttpVerb.ALL.value): ---------- http_method: str """ - self._add_route(effect="Allow", verb=http_method, resource="*", conditions=[]) + self._add_route(effect="Allow", http_method=http_method, resource="*") def deny_all_routes(self, http_method: str = HttpVerb.ALL.value): """Adds a '*' allow to the policy to deny access to all methods of an API @@ -452,7 +502,7 @@ def deny_all_routes(self, http_method: str = HttpVerb.ALL.value): http_method: str """ - self._add_route(effect="Deny", verb=http_method, resource="*", conditions=[]) + self._add_route(effect="Deny", http_method=http_method, resource="*") def allow_route(self, http_method: str, resource: str, conditions: Optional[List[Dict]] = None): """Adds an API Gateway method (Http verb + Resource path) to the list of allowed @@ -460,8 +510,7 @@ def allow_route(self, http_method: str, resource: str, conditions: Optional[List Optionally includes a condition for the policy statement. More on AWS policy conditions here: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition""" - conditions = conditions or [] - self._add_route(effect="Allow", verb=http_method, resource=resource, conditions=conditions) + self._add_route(effect="Allow", http_method=http_method, resource=resource, conditions=conditions) def deny_route(self, http_method: str, resource: str, conditions: Optional[List[Dict]] = None): """Adds an API Gateway method (Http verb + Resource path) to the list of denied @@ -469,8 +518,7 @@ def deny_route(self, http_method: str, resource: str, conditions: Optional[List[ Optionally includes a condition for the policy statement. More on AWS policy conditions here: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition""" - conditions = conditions or [] - self._add_route(effect="Deny", verb=http_method, resource=resource, conditions=conditions) + self._add_route(effect="Deny", http_method=http_method, resource=resource, conditions=conditions) def asdict(self) -> Dict[str, Any]: """Generates the policy document based on the internal lists of allowed and denied @@ -482,12 +530,15 @@ def asdict(self) -> Dict[str, Any]: response: Dict[str, Any] = { "principalId": self.principal_id, - "policyDocument": {"Version": self.version, "Statement": []}, + "policyDocument": {"Version": "2012-10-17", "Statement": []}, } response["policyDocument"]["Statement"].extend(self._get_statement_for_effect("Allow", self._allow_routes)) response["policyDocument"]["Statement"].extend(self._get_statement_for_effect("Deny", self._deny_routes)) + if self.usage_identifier_key: + response["usageIdentifierKey"] = self.usage_identifier_key + if self.context: response["context"] = self.context diff --git a/aws_lambda_powertools/utilities/data_classes/sqs_event.py b/aws_lambda_powertools/utilities/data_classes/sqs_event.py index 0e70684cc3f..57caeea4cc2 100644 --- a/aws_lambda_powertools/utilities/data_classes/sqs_event.py +++ b/aws_lambda_powertools/utilities/data_classes/sqs_event.py @@ -75,9 +75,9 @@ def data_type(self) -> str: class SQSMessageAttributes(Dict[str, SQSMessageAttribute]): - def __getitem__(self, key: str) -> Optional[SQSMessageAttribute]: + def __getitem__(self, key: str) -> Optional[SQSMessageAttribute]: # type: ignore item = super(SQSMessageAttributes, self).get(key) - return None if item is None else SQSMessageAttribute(item) + return None if item is None else SQSMessageAttribute(item) # type: ignore class SQSRecord(DictWrapper): diff --git a/aws_lambda_powertools/utilities/feature_flags/appconfig.py b/aws_lambda_powertools/utilities/feature_flags/appconfig.py index 2e0edc3b9b1..dd581df9e22 100644 --- a/aws_lambda_powertools/utilities/feature_flags/appconfig.py +++ b/aws_lambda_powertools/utilities/feature_flags/appconfig.py @@ -1,17 +1,16 @@ import logging import traceback -from typing import Any, Dict, Optional, cast +from typing import Any, Dict, Optional, Union, cast from botocore.config import Config +from aws_lambda_powertools.utilities import jmespath_utils from aws_lambda_powertools.utilities.parameters import AppConfigProvider, GetParameterError, TransformParameterError -from ...shared import jmespath_utils +from ... import Logger from .base import StoreProvider from .exceptions import ConfigurationStoreError, StoreClientError -logger = logging.getLogger(__name__) - TRANSFORM_TYPE = "json" @@ -25,6 +24,7 @@ def __init__( sdk_config: Optional[Config] = None, envelope: Optional[str] = "", jmespath_options: Optional[Dict] = None, + logger: Optional[Union[logging.Logger, Logger]] = None, ): """This class fetches JSON schemas from AWS AppConfig @@ -44,8 +44,11 @@ def __init__( JMESPath expression to pluck feature flags data from config jmespath_options : Optional[Dict] Alternative JMESPath options to be included when filtering expr + logger: A logging object + Used to log messages. If None is supplied, one will be created. """ super().__init__() + self.logger = logger or logging.getLogger(__name__) self.environment = environment self.application = application self.name = name @@ -55,9 +58,34 @@ def __init__( self.jmespath_options = jmespath_options self._conf_store = AppConfigProvider(environment=environment, application=application, config=sdk_config) + @property + def get_raw_configuration(self) -> Dict[str, Any]: + """Fetch feature schema configuration from AWS AppConfig""" + try: + # parse result conf as JSON, keep in cache for self.max_age seconds + self.logger.debug( + "Fetching configuration from the store", extra={"param_name": self.name, "max_age": self.cache_seconds} + ) + return cast( + dict, + self._conf_store.get( + name=self.name, + transform=TRANSFORM_TYPE, + max_age=self.cache_seconds, + ), + ) + except (GetParameterError, TransformParameterError) as exc: + err_msg = traceback.format_exc() + if "AccessDenied" in err_msg: + raise StoreClientError(err_msg) from exc + raise ConfigurationStoreError("Unable to get AWS AppConfig configuration file") from exc + def get_configuration(self) -> Dict[str, Any]: """Fetch feature schema configuration from AWS AppConfig + If envelope is set, it'll extract and return feature flags from configuration, + otherwise it'll return the entire configuration fetched from AWS AppConfig. + Raises ------ ConfigurationStoreError @@ -68,25 +96,12 @@ def get_configuration(self) -> Dict[str, Any]: Dict[str, Any] parsed JSON dictionary """ - try: - # parse result conf as JSON, keep in cache for self.max_age seconds - config = cast( - dict, - self._conf_store.get( - name=self.name, - transform=TRANSFORM_TYPE, - max_age=self.cache_seconds, - ), - ) + config = self.get_raw_configuration - if self.envelope: - config = jmespath_utils.extract_data_from_envelope( - data=config, envelope=self.envelope, jmespath_options=self.jmespath_options - ) + if self.envelope: + self.logger.debug("Envelope enabled; extracting data from config", extra={"envelope": self.envelope}) + config = jmespath_utils.extract_data_from_envelope( + data=config, envelope=self.envelope, jmespath_options=self.jmespath_options + ) - return config - except (GetParameterError, TransformParameterError) as exc: - err_msg = traceback.format_exc() - if "AccessDenied" in err_msg: - raise StoreClientError(err_msg) from exc - raise ConfigurationStoreError("Unable to get AWS AppConfig configuration file") from exc + return config diff --git a/aws_lambda_powertools/utilities/feature_flags/base.py b/aws_lambda_powertools/utilities/feature_flags/base.py index edb94c4f45d..e323f32d8b1 100644 --- a/aws_lambda_powertools/utilities/feature_flags/base.py +++ b/aws_lambda_powertools/utilities/feature_flags/base.py @@ -3,10 +3,19 @@ class StoreProvider(ABC): + @property + @abstractmethod + def get_raw_configuration(self) -> Dict[str, Any]: + """Get configuration from any store and return the parsed JSON dictionary""" + raise NotImplementedError() # pragma: no cover + @abstractmethod def get_configuration(self) -> Dict[str, Any]: """Get configuration from any store and return the parsed JSON dictionary + If envelope is set, it'll extract and return feature flags from configuration, + otherwise it'll return the entire configuration fetched from the store. + Raises ------ ConfigurationStoreError @@ -42,10 +51,10 @@ def get_configuration(self) -> Dict[str, Any]: } ``` """ - return NotImplemented # pragma: no cover + raise NotImplementedError() # pragma: no cover class BaseValidator(ABC): @abstractmethod def validate(self): - return NotImplemented # pragma: no cover + raise NotImplementedError() # pragma: no cover diff --git a/aws_lambda_powertools/utilities/feature_flags/feature_flags.py b/aws_lambda_powertools/utilities/feature_flags/feature_flags.py index d04e74ff293..c66feee0536 100644 --- a/aws_lambda_powertools/utilities/feature_flags/feature_flags.py +++ b/aws_lambda_powertools/utilities/feature_flags/feature_flags.py @@ -1,15 +1,14 @@ import logging from typing import Any, Dict, List, Optional, Union, cast +from ... import Logger from . import schema from .base import StoreProvider from .exceptions import ConfigurationStoreError -logger = logging.getLogger(__name__) - class FeatureFlags: - def __init__(self, store: StoreProvider): + def __init__(self, store: StoreProvider, logger: Optional[Union[logging.Logger, Logger]] = None): """Evaluates whether feature flags should be enabled based on a given context. It uses the provided store to fetch feature flag rules before evaluating them. @@ -35,26 +34,37 @@ def __init__(self, store: StoreProvider): ---------- store: StoreProvider Store to use to fetch feature flag schema configuration. + logger: A logging object + Used to log messages. If None is supplied, one will be created. """ - self._store = store + self.store = store + self.logger = logger or logging.getLogger(__name__) - @staticmethod - def _match_by_action(action: str, condition_value: Any, context_value: Any) -> bool: + def _match_by_action(self, action: str, condition_value: Any, context_value: Any) -> bool: if not context_value: return False mapping_by_action = { schema.RuleAction.EQUALS.value: lambda a, b: a == b, + schema.RuleAction.NOT_EQUALS.value: lambda a, b: a != b, + schema.RuleAction.KEY_GREATER_THAN_VALUE.value: lambda a, b: a > b, + schema.RuleAction.KEY_GREATER_THAN_OR_EQUAL_VALUE.value: lambda a, b: a >= b, + schema.RuleAction.KEY_LESS_THAN_VALUE.value: lambda a, b: a < b, + schema.RuleAction.KEY_LESS_THAN_OR_EQUAL_VALUE.value: lambda a, b: a <= b, schema.RuleAction.STARTSWITH.value: lambda a, b: a.startswith(b), schema.RuleAction.ENDSWITH.value: lambda a, b: a.endswith(b), schema.RuleAction.IN.value: lambda a, b: a in b, schema.RuleAction.NOT_IN.value: lambda a, b: a not in b, + schema.RuleAction.KEY_IN_VALUE.value: lambda a, b: a in b, + schema.RuleAction.KEY_NOT_IN_VALUE.value: lambda a, b: a not in b, + schema.RuleAction.VALUE_IN_KEY.value: lambda a, b: b in a, + schema.RuleAction.VALUE_NOT_IN_KEY.value: lambda a, b: b not in a, } try: func = mapping_by_action.get(action, lambda a, b: False) return func(context_value, condition_value) except Exception as exc: - logger.debug(f"caught exception while matching action: action={action}, exception={str(exc)}") + self.logger.debug(f"caught exception while matching action: action={action}, exception={str(exc)}") return False def _evaluate_conditions( @@ -65,7 +75,7 @@ def _evaluate_conditions( conditions = cast(List[Dict], rule.get(schema.CONDITIONS_KEY)) if not conditions: - logger.debug( + self.logger.debug( f"rule did not match, no conditions to match, rule_name={rule_name}, rule_value={rule_match_value}, " f"name={feature_name} " ) @@ -77,13 +87,13 @@ def _evaluate_conditions( cond_value = condition.get(schema.CONDITION_VALUE) if not self._match_by_action(action=cond_action, condition_value=cond_value, context_value=context_value): - logger.debug( + self.logger.debug( f"rule did not match action, rule_name={rule_name}, rule_value={rule_match_value}, " f"name={feature_name}, context_value={str(context_value)} " ) return False # context doesn't match condition - logger.debug(f"rule matched, rule_name={rule_name}, rule_value={rule_match_value}, name={feature_name}") + self.logger.debug(f"rule matched, rule_name={rule_name}, rule_value={rule_match_value}, name={feature_name}") return True def _evaluate_rules( @@ -94,16 +104,17 @@ def _evaluate_rules( rule_match_value = rule.get(schema.RULE_MATCH_VALUE) # Context might contain PII data; do not log its value - logger.debug(f"Evaluating rule matching, rule={rule_name}, feature={feature_name}, default={feat_default}") + self.logger.debug( + f"Evaluating rule matching, rule={rule_name}, feature={feature_name}, default={feat_default}" + ) if self._evaluate_conditions(rule_name=rule_name, feature_name=feature_name, rule=rule, context=context): return bool(rule_match_value) - # no rule matched, return default value of feature - logger.debug(f"no rule matched, returning feature default, default={feat_default}, name={feature_name}") - return feat_default - return False + # no rule matched, return default value of feature + self.logger.debug(f"no rule matched, returning feature default, default={feat_default}, name={feature_name}") + return feat_default - def get_configuration(self) -> Union[Dict[str, Dict], Dict]: + def get_configuration(self) -> Dict: """Get validated feature flag schema from configured store. Largely used to aid testing, since it's called by `evaluate` and `get_enabled_features` methods. @@ -146,8 +157,8 @@ def get_configuration(self) -> Union[Dict[str, Dict], Dict]: ``` """ # parse result conf as JSON, keep in cache for max age defined in store - logger.debug(f"Fetching schema from registered store, store={self._store}") - config = self._store.get_configuration() + self.logger.debug(f"Fetching schema from registered store, store={self.store}") + config: Dict = self.store.get_configuration() validator = schema.SchemaValidator(schema=config) validator.validate() @@ -190,21 +201,21 @@ def evaluate(self, *, name: str, context: Optional[Dict[str, Any]] = None, defau try: features = self.get_configuration() except ConfigurationStoreError as err: - logger.debug(f"Failed to fetch feature flags from store, returning default provided, reason={err}") + self.logger.debug(f"Failed to fetch feature flags from store, returning default provided, reason={err}") return default feature = features.get(name) if feature is None: - logger.debug(f"Feature not found; returning default provided, name={name}, default={default}") + self.logger.debug(f"Feature not found; returning default provided, name={name}, default={default}") return default rules = feature.get(schema.RULES_KEY) feat_default = feature.get(schema.FEATURE_DEFAULT_VAL_KEY) if not rules: - logger.debug(f"no rules found, returning feature default, name={name}, default={feat_default}") + self.logger.debug(f"no rules found, returning feature default, name={name}, default={feat_default}") return bool(feat_default) - logger.debug(f"looking for rule match, name={name}, default={feat_default}") + self.logger.debug(f"looking for rule match, name={name}, default={feat_default}") return self._evaluate_rules(feature_name=name, context=context, feat_default=bool(feat_default), rules=rules) def get_enabled_features(self, *, context: Optional[Dict[str, Any]] = None) -> List[str]: @@ -241,20 +252,20 @@ def get_enabled_features(self, *, context: Optional[Dict[str, Any]] = None) -> L try: features: Dict[str, Any] = self.get_configuration() except ConfigurationStoreError as err: - logger.debug(f"Failed to fetch feature flags from store, returning empty list, reason={err}") + self.logger.debug(f"Failed to fetch feature flags from store, returning empty list, reason={err}") return features_enabled - logger.debug("Evaluating all features") + self.logger.debug("Evaluating all features") for name, feature in features.items(): rules = feature.get(schema.RULES_KEY, {}) feature_default_value = feature.get(schema.FEATURE_DEFAULT_VAL_KEY) if feature_default_value and not rules: - logger.debug(f"feature is enabled by default and has no defined rules, name={name}") + self.logger.debug(f"feature is enabled by default and has no defined rules, name={name}") features_enabled.append(name) elif self._evaluate_rules( feature_name=name, context=context, feat_default=feature_default_value, rules=rules ): - logger.debug(f"feature's calculated value is True, name={name}") + self.logger.debug(f"feature's calculated value is True, name={name}") features_enabled.append(name) return features_enabled diff --git a/aws_lambda_powertools/utilities/feature_flags/schema.py b/aws_lambda_powertools/utilities/feature_flags/schema.py index efce82018db..6a92508676e 100644 --- a/aws_lambda_powertools/utilities/feature_flags/schema.py +++ b/aws_lambda_powertools/utilities/feature_flags/schema.py @@ -1,12 +1,11 @@ import logging from enum import Enum -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union +from ... import Logger from .base import BaseValidator from .exceptions import SchemaValidationError -logger = logging.getLogger(__name__) - RULES_KEY = "rules" FEATURE_DEFAULT_VAL_KEY = "default" CONDITIONS_KEY = "conditions" @@ -18,10 +17,19 @@ class RuleAction(str, Enum): EQUALS = "EQUALS" + NOT_EQUALS = "NOT_EQUALS" + KEY_GREATER_THAN_VALUE = "KEY_GREATER_THAN_VALUE" + KEY_GREATER_THAN_OR_EQUAL_VALUE = "KEY_GREATER_THAN_OR_EQUAL_VALUE" + KEY_LESS_THAN_VALUE = "KEY_LESS_THAN_VALUE" + KEY_LESS_THAN_OR_EQUAL_VALUE = "KEY_LESS_THAN_OR_EQUAL_VALUE" STARTSWITH = "STARTSWITH" ENDSWITH = "ENDSWITH" IN = "IN" NOT_IN = "NOT_IN" + KEY_IN_VALUE = "KEY_IN_VALUE" + KEY_NOT_IN_VALUE = "KEY_NOT_IN_VALUE" + VALUE_IN_KEY = "VALUE_IN_KEY" + VALUE_NOT_IN_KEY = "VALUE_NOT_IN_KEY" class SchemaValidator(BaseValidator): @@ -80,7 +88,9 @@ class SchemaValidator(BaseValidator): The value MUST contain the following members: * **action**: `str`. Operation to perform to match a key and value. - The value MUST be either EQUALS, STARTSWITH, ENDSWITH, IN, NOT_IN + The value MUST be either EQUALS, STARTSWITH, ENDSWITH, + KEY_IN_VALUE KEY_NOT_IN_VALUE VALUE_IN_KEY VALUE_NOT_IN_KEY + * **key**: `str`. Key in given context to perform operation * **value**: `Any`. Value in given context that should match action operation. @@ -105,11 +115,12 @@ class SchemaValidator(BaseValidator): ``` """ - def __init__(self, schema: Dict[str, Any]): + def __init__(self, schema: Dict[str, Any], logger: Optional[Union[logging.Logger, Logger]] = None): self.schema = schema + self.logger = logger or logging.getLogger(__name__) def validate(self) -> None: - logger.debug("Validating schema") + self.logger.debug("Validating schema") if not isinstance(self.schema, dict): raise SchemaValidationError(f"Features must be a dictionary, schema={str(self.schema)}") @@ -120,12 +131,13 @@ def validate(self) -> None: class FeaturesValidator(BaseValidator): """Validates each feature and calls RulesValidator to validate its rules""" - def __init__(self, schema: Dict): + def __init__(self, schema: Dict, logger: Optional[Union[logging.Logger, Logger]] = None): self.schema = schema + self.logger = logger or logging.getLogger(__name__) def validate(self): for name, feature in self.schema.items(): - logger.debug(f"Attempting to validate feature '{name}'") + self.logger.debug(f"Attempting to validate feature '{name}'") self.validate_feature(name, feature) rules = RulesValidator(feature=feature) rules.validate() @@ -143,21 +155,22 @@ def validate_feature(name, feature): class RulesValidator(BaseValidator): """Validates each rule and calls ConditionsValidator to validate each rule's conditions""" - def __init__(self, feature: Dict[str, Any]): + def __init__(self, feature: Dict[str, Any], logger: Optional[Union[logging.Logger, Logger]] = None): self.feature = feature self.feature_name = next(iter(self.feature)) self.rules: Optional[Dict] = self.feature.get(RULES_KEY) + self.logger = logger or logging.getLogger(__name__) def validate(self): if not self.rules: - logger.debug("Rules are empty, ignoring validation") + self.logger.debug("Rules are empty, ignoring validation") return if not isinstance(self.rules, dict): raise SchemaValidationError(f"Feature rules must be a dictionary, feature={self.feature_name}") for rule_name, rule in self.rules.items(): - logger.debug(f"Attempting to validate rule '{rule_name}'") + self.logger.debug(f"Attempting to validate rule '{rule_name}'") self.validate_rule(rule=rule, rule_name=rule_name, feature_name=self.feature_name) conditions = ConditionsValidator(rule=rule, rule_name=rule_name) conditions.validate() @@ -183,15 +196,18 @@ def validate_rule_default_value(rule: Dict, rule_name: str): class ConditionsValidator(BaseValidator): - def __init__(self, rule: Dict[str, Any], rule_name: str): + def __init__(self, rule: Dict[str, Any], rule_name: str, logger: Optional[Union[logging.Logger, Logger]] = None): self.conditions: List[Dict[str, Any]] = rule.get(CONDITIONS_KEY, {}) self.rule_name = rule_name + self.logger = logger or logging.getLogger(__name__) def validate(self): if not self.conditions or not isinstance(self.conditions, list): raise SchemaValidationError(f"Invalid condition, rule={self.rule_name}") for condition in self.conditions: + # Condition can contain PII data; do not log condition value + self.logger.debug(f"Attempting to validate condition for '{self.rule_name}'") self.validate_condition(rule_name=self.rule_name, condition=condition) @staticmethod @@ -199,8 +215,6 @@ def validate_condition(rule_name: str, condition: Dict[str, str]) -> None: if not condition or not isinstance(condition, dict): raise SchemaValidationError(f"Feature rule condition must be a dictionary, rule={rule_name}") - # Condition can contain PII data; do not log condition value - logger.debug(f"Attempting to validate condition for '{rule_name}'") ConditionsValidator.validate_condition_action(condition=condition, rule_name=rule_name) ConditionsValidator.validate_condition_key(condition=condition, rule_name=rule_name) ConditionsValidator.validate_condition_value(condition=condition, rule_name=rule_name) diff --git a/aws_lambda_powertools/utilities/idempotency/idempotency.py b/aws_lambda_powertools/utilities/idempotency/idempotency.py index 06c9a578aa2..6984cfbbd8e 100644 --- a/aws_lambda_powertools/utilities/idempotency/idempotency.py +++ b/aws_lambda_powertools/utilities/idempotency/idempotency.py @@ -3,9 +3,11 @@ """ import functools import logging +import os from typing import Any, Callable, Dict, Optional, cast from aws_lambda_powertools.middleware_factory import lambda_handler_decorator +from aws_lambda_powertools.shared.constants import IDEMPOTENCY_DISABLED_ENV from aws_lambda_powertools.shared.types import AnyCallableT from aws_lambda_powertools.utilities.idempotency.base import IdempotencyHandler from aws_lambda_powertools.utilities.idempotency.config import IdempotencyConfig @@ -56,6 +58,9 @@ def idempotent( >>> return {"StatusCode": 200} """ + if os.getenv(IDEMPOTENCY_DISABLED_ENV): + return handler(event, context) + config = config or IdempotencyConfig() args = event, context idempotency_handler = IdempotencyHandler( @@ -122,6 +127,9 @@ def process_order(customer_id: str, order: dict, **kwargs): @functools.wraps(function) def decorate(*args, **kwargs): + if os.getenv(IDEMPOTENCY_DISABLED_ENV): + return function(*args, **kwargs) + payload = kwargs.get(data_keyword_argument) if payload is None: diff --git a/aws_lambda_powertools/utilities/idempotency/persistence/base.py b/aws_lambda_powertools/utilities/idempotency/persistence/base.py index 4901e9f9f75..907af8edaa7 100644 --- a/aws_lambda_powertools/utilities/idempotency/persistence/base.py +++ b/aws_lambda_powertools/utilities/idempotency/persistence/base.py @@ -16,7 +16,6 @@ from aws_lambda_powertools.shared import constants from aws_lambda_powertools.shared.cache_dict import LRUDict -from aws_lambda_powertools.shared.jmespath_utils import PowertoolsFunctions from aws_lambda_powertools.shared.json_encoder import Encoder from aws_lambda_powertools.utilities.idempotency.config import IdempotencyConfig from aws_lambda_powertools.utilities.idempotency.exceptions import ( @@ -25,6 +24,7 @@ IdempotencyKeyError, IdempotencyValidationError, ) +from aws_lambda_powertools.utilities.jmespath_utils import PowertoolsFunctions logger = logging.getLogger(__name__) diff --git a/aws_lambda_powertools/utilities/idempotency/persistence/dynamodb.py b/aws_lambda_powertools/utilities/idempotency/persistence/dynamodb.py index ae3a1be490f..0ce307ab503 100644 --- a/aws_lambda_powertools/utilities/idempotency/persistence/dynamodb.py +++ b/aws_lambda_powertools/utilities/idempotency/persistence/dynamodb.py @@ -62,11 +62,11 @@ def __init__( >>> return {"StatusCode": 200} """ - boto_config = boto_config or Config() - session = boto3_session or boto3.session.Session() - self._ddb_resource = session.resource("dynamodb", config=boto_config) + self._boto_config = boto_config or Config() + self._boto3_session = boto3_session or boto3.session.Session() + + self._table = None self.table_name = table_name - self.table = self._ddb_resource.Table(self.table_name) self.key_attr = key_attr self.expiry_attr = expiry_attr self.status_attr = status_attr @@ -74,6 +74,25 @@ def __init__( self.validation_key_attr = validation_key_attr super(DynamoDBPersistenceLayer, self).__init__() + @property + def table(self): + """ + Caching property to store boto3 dynamodb Table resource + + """ + if self._table: + return self._table + ddb_resource = self._boto3_session.resource("dynamodb", config=self._boto_config) + self._table = ddb_resource.Table(self.table_name) + return self._table + + @table.setter + def table(self, table): + """ + Allow table instance variable to be set directly, primarily for use in tests + """ + self._table = table + def _item_to_data_record(self, item: Dict[str, Any]) -> DataRecord: """ Translate raw item records from DynamoDB to DataRecord @@ -121,10 +140,11 @@ def _put_record(self, data_record: DataRecord) -> None: logger.debug(f"Putting record for idempotency key: {data_record.idempotency_key}") self.table.put_item( Item=item, - ConditionExpression=f"attribute_not_exists({self.key_attr}) OR {self.expiry_attr} < :now", + ConditionExpression="attribute_not_exists(#id) OR #now < :now", + ExpressionAttributeNames={"#id": self.key_attr, "#now": self.expiry_attr}, ExpressionAttributeValues={":now": int(now.timestamp())}, ) - except self._ddb_resource.meta.client.exceptions.ConditionalCheckFailedException: + except self.table.meta.client.exceptions.ConditionalCheckFailedException: logger.debug(f"Failed to put record for already existing idempotency key: {data_record.idempotency_key}") raise IdempotencyItemAlreadyExistsError @@ -154,7 +174,7 @@ def _update_record(self, data_record: DataRecord): "ExpressionAttributeNames": expression_attr_names, } - self.table.update_item(**kwargs) # type: ignore + self.table.update_item(**kwargs) def _delete_record(self, data_record: DataRecord) -> None: logger.debug(f"Deleting record for idempotency key: {data_record.idempotency_key}") diff --git a/aws_lambda_powertools/shared/jmespath_utils.py b/aws_lambda_powertools/utilities/jmespath_utils/__init__.py similarity index 59% rename from aws_lambda_powertools/shared/jmespath_utils.py rename to aws_lambda_powertools/utilities/jmespath_utils/__init__.py index 9cc736aedfb..a8d210bc1e0 100644 --- a/aws_lambda_powertools/shared/jmespath_utils.py +++ b/aws_lambda_powertools/utilities/jmespath_utils/__init__.py @@ -6,22 +6,23 @@ import jmespath from jmespath.exceptions import LexerError +from jmespath.functions import Functions, signature from aws_lambda_powertools.exceptions import InvalidEnvelopeExpressionError logger = logging.getLogger(__name__) -class PowertoolsFunctions(jmespath.functions.Functions): - @jmespath.functions.signature({"types": ["string"]}) +class PowertoolsFunctions(Functions): + @signature({"types": ["string"]}) def _func_powertools_json(self, value): return json.loads(value) - @jmespath.functions.signature({"types": ["string"]}) + @signature({"types": ["string"]}) def _func_powertools_base64(self, value): return base64.b64decode(value).decode() - @jmespath.functions.signature({"types": ["string"]}) + @signature({"types": ["string"]}) def _func_powertools_base64_gzip(self, value): encoded = base64.b64decode(value) uncompressed = gzip.decompress(encoded) @@ -29,8 +30,27 @@ def _func_powertools_base64_gzip(self, value): return uncompressed.decode() -def extract_data_from_envelope(data: Union[Dict, str], envelope: str, jmespath_options: Optional[Dict]) -> Any: - """Searches data using JMESPath expression +def extract_data_from_envelope(data: Union[Dict, str], envelope: str, jmespath_options: Optional[Dict] = None) -> Any: + """Searches and extracts data using JMESPath + + Envelope being the JMESPath expression to extract the data you're after + + Built-in JMESPath functions include: powertools_json, powertools_base64, powertools_base64_gzip + + Examples + -------- + + **Deserialize JSON string and extracts data from body key** + + from aws_lambda_powertools.utilities.jmespath_utils import extract_data_from_envelope + from aws_lambda_powertools.utilities.typing import LambdaContext + + + def handler(event: dict, context: LambdaContext): + # event = {"body": "{\"customerId\":\"dd4649e6-2484-4993-acb8-0f9123103394\"}"} # noqa: E800 + payload = extract_data_from_envelope(data=event, envelope="powertools_json(body)") + customer = payload.get("customerId") # now deserialized + ... Parameters ---------- @@ -41,6 +61,7 @@ def extract_data_from_envelope(data: Union[Dict, str], envelope: str, jmespath_o jmespath_options : Dict Alternative JMESPath options to be included when filtering expr + Returns ------- Any diff --git a/aws_lambda_powertools/utilities/jmespath_utils/envelopes.py b/aws_lambda_powertools/utilities/jmespath_utils/envelopes.py new file mode 100644 index 00000000000..df50e5f98d4 --- /dev/null +++ b/aws_lambda_powertools/utilities/jmespath_utils/envelopes.py @@ -0,0 +1,8 @@ +API_GATEWAY_REST = "powertools_json(body)" +API_GATEWAY_HTTP = API_GATEWAY_REST +SQS = "Records[*].powertools_json(body)" +SNS = "Records[0].Sns.Message | powertools_json(@)" +EVENTBRIDGE = "detail" +CLOUDWATCH_EVENTS_SCHEDULED = EVENTBRIDGE +KINESIS_DATA_STREAM = "Records[*].kinesis.powertools_json(powertools_base64(data))" +CLOUDWATCH_LOGS = "awslogs.powertools_base64_gzip(data) | powertools_json(@).logEvents[*]" diff --git a/aws_lambda_powertools/utilities/parameters/appconfig.py b/aws_lambda_powertools/utilities/parameters/appconfig.py index 4a400aa7789..d1613c14513 100644 --- a/aws_lambda_powertools/utilities/parameters/appconfig.py +++ b/aws_lambda_powertools/utilities/parameters/appconfig.py @@ -29,6 +29,8 @@ class AppConfigProvider(BaseProvider): Application of the configuration to pass during client initialization config: botocore.config.Config, optional Botocore configuration to pass during client initialization + boto3_session : boto3.session.Session, optional + Boto3 session to use for AWS API communication Example ------- @@ -60,13 +62,20 @@ class AppConfigProvider(BaseProvider): client: Any = None - def __init__(self, environment: str, application: Optional[str] = None, config: Optional[Config] = None): + def __init__( + self, + environment: str, + application: Optional[str] = None, + config: Optional[Config] = None, + boto3_session: Optional[boto3.session.Session] = None, + ): """ Initialize the App Config client """ config = config or Config() - self.client = boto3.client("appconfig", config=config) + session = boto3_session or boto3.session.Session() + self.client = session.client("appconfig", config=config) self.application = resolve_env_var_choice( choice=application, env=os.getenv(constants.SERVICE_NAME_ENV, "service_undefined") ) diff --git a/aws_lambda_powertools/utilities/parameters/dynamodb.py b/aws_lambda_powertools/utilities/parameters/dynamodb.py index 39bd1a8d6b7..9220edf3b05 100644 --- a/aws_lambda_powertools/utilities/parameters/dynamodb.py +++ b/aws_lambda_powertools/utilities/parameters/dynamodb.py @@ -30,6 +30,8 @@ class DynamoDBProvider(BaseProvider): Complete url to reference local DynamoDB instance, e.g. http://localhost:8080 config: botocore.config.Config, optional Botocore configuration to pass during client initialization + boto3_session : boto3.session.Session, optional + Boto3 session to use for AWS API communication Example ------- @@ -149,13 +151,16 @@ def __init__( value_attr: str = "value", endpoint_url: Optional[str] = None, config: Optional[Config] = None, + boto3_session: Optional[boto3.session.Session] = None, ): """ Initialize the DynamoDB client """ config = config or Config() - self.table = boto3.resource("dynamodb", endpoint_url=endpoint_url, config=config).Table(table_name) + session = boto3_session or boto3.session.Session() + + self.table = session.resource("dynamodb", endpoint_url=endpoint_url, config=config).Table(table_name) self.key_attr = key_attr self.sort_attr = sort_attr diff --git a/aws_lambda_powertools/utilities/parameters/secrets.py b/aws_lambda_powertools/utilities/parameters/secrets.py index 5699876d90e..b64e70ae184 100644 --- a/aws_lambda_powertools/utilities/parameters/secrets.py +++ b/aws_lambda_powertools/utilities/parameters/secrets.py @@ -19,6 +19,8 @@ class SecretsProvider(BaseProvider): ---------- config: botocore.config.Config, optional Botocore configuration to pass during client initialization + boto3_session : boto3.session.Session, optional + Boto3 session to use for AWS API communication Example ------- @@ -58,14 +60,14 @@ class SecretsProvider(BaseProvider): client: Any = None - def __init__(self, config: Optional[Config] = None): + def __init__(self, config: Optional[Config] = None, boto3_session: Optional[boto3.session.Session] = None): """ Initialize the Secrets Manager client """ config = config or Config() - - self.client = boto3.client("secretsmanager", config=config) + session = boto3_session or boto3.session.Session() + self.client = session.client("secretsmanager", config=config) super().__init__() diff --git a/aws_lambda_powertools/utilities/parameters/ssm.py b/aws_lambda_powertools/utilities/parameters/ssm.py index 2a16ad91f08..4cbb16354c7 100644 --- a/aws_lambda_powertools/utilities/parameters/ssm.py +++ b/aws_lambda_powertools/utilities/parameters/ssm.py @@ -19,6 +19,8 @@ class SSMProvider(BaseProvider): ---------- config: botocore.config.Config, optional Botocore configuration to pass during client initialization + boto3_session : boto3.session.Session, optional + Boto3 session to use for AWS API communication Example ------- @@ -74,13 +76,14 @@ class SSMProvider(BaseProvider): client: Any = None - def __init__(self, config: Optional[Config] = None): + def __init__(self, config: Optional[Config] = None, boto3_session: Optional[boto3.session.Session] = None): """ Initialize the SSM Parameter Store client """ config = config or Config() - self.client = boto3.client("ssm", config=config) + session = boto3_session or boto3.session.Session() + self.client = session.client("ssm", config=config) super().__init__() diff --git a/aws_lambda_powertools/utilities/validation/base.py b/aws_lambda_powertools/utilities/validation/base.py index 13deb4d24e2..61d692d7f28 100644 --- a/aws_lambda_powertools/utilities/validation/base.py +++ b/aws_lambda_powertools/utilities/validation/base.py @@ -32,6 +32,15 @@ def validate_data_against_schema(data: Union[Dict, str], schema: Dict, formats: fastjsonschema.validate(definition=schema, data=data, formats=formats) except (TypeError, AttributeError, fastjsonschema.JsonSchemaDefinitionException) as e: raise InvalidSchemaFormatError(f"Schema received: {schema}, Formats: {formats}. Error: {e}") - except fastjsonschema.JsonSchemaException as e: - message = f"Failed schema validation. Error: {e.message}, Path: {e.path}, Data: {e.value}" # noqa: B306, E501 - raise SchemaValidationError(message) + except fastjsonschema.JsonSchemaValueException as e: + message = f"Failed schema validation. Error: {e.message}, Path: {e.path}, Data: {e.value}" # noqa: B306 + raise SchemaValidationError( + message, + validation_message=e.message, # noqa: B306 + name=e.name, + path=e.path, + value=e.value, + definition=e.definition, + rule=e.rule, + rule_definition=e.rule_definition, + ) diff --git a/aws_lambda_powertools/utilities/validation/exceptions.py b/aws_lambda_powertools/utilities/validation/exceptions.py index d4aaa500ec7..7c719ca3119 100644 --- a/aws_lambda_powertools/utilities/validation/exceptions.py +++ b/aws_lambda_powertools/utilities/validation/exceptions.py @@ -1,9 +1,59 @@ +from typing import Any, List, Optional + from ...exceptions import InvalidEnvelopeExpressionError class SchemaValidationError(Exception): """When serialization fail schema validation""" + def __init__( + self, + message: str, + validation_message: Optional[str] = None, + name: Optional[str] = None, + path: Optional[List] = None, + value: Optional[Any] = None, + definition: Optional[Any] = None, + rule: Optional[str] = None, + rule_definition: Optional[Any] = None, + ): + """ + + Parameters + ---------- + message : str + Powertools formatted error message + validation_message : str, optional + Containing human-readable information what is wrong + (e.g. `data.property[index] must be smaller than or equal to 42`) + name : str, optional + name of a path in the data structure + (e.g. `data.property[index]`) + path: List, optional + `path` as an array in the data structure + (e.g. `['data', 'property', 'index']`), + value : Any, optional + The invalid value + definition : Any, optional + The full rule `definition` + (e.g. `42`) + rule : str, optional + `rule` which the `data` is breaking + (e.g. `maximum`) + rule_definition : Any, optional + The specific rule `definition` + (e.g. `42`) + """ + super().__init__(message) + self.message = message + self.validation_message = validation_message + self.name = name + self.path = path + self.value = value + self.definition = definition + self.rule = rule + self.rule_definition = rule_definition + class InvalidSchemaFormatError(Exception): """When JSON Schema is in invalid format""" diff --git a/aws_lambda_powertools/utilities/validation/validator.py b/aws_lambda_powertools/utilities/validation/validator.py index d9ce35fe41b..aab383eeb45 100644 --- a/aws_lambda_powertools/utilities/validation/validator.py +++ b/aws_lambda_powertools/utilities/validation/validator.py @@ -1,8 +1,9 @@ import logging from typing import Any, Callable, Dict, Optional, Union +from aws_lambda_powertools.utilities import jmespath_utils + from ...middleware_factory import lambda_handler_decorator -from ...shared import jmespath_utils from .base import validate_data_against_schema logger = logging.getLogger(__name__) diff --git a/docs/core/event_handler/api_gateway.md b/docs/core/event_handler/api_gateway.md index 76a72fd03c7..aeaa75e0d2a 100644 --- a/docs/core/event_handler/api_gateway.md +++ b/docs/core/event_handler/api_gateway.md @@ -287,6 +287,8 @@ You can use `/path/{dynamic_value}` when configuring dynamic URL paths. This all } ``` +#### Nested routes + You can also nest paths as configured earlier in [our sample infrastructure](#required-resources): `/{message}/{name}`. === "app.py" @@ -323,6 +325,42 @@ You can also nest paths as configured earlier in [our sample infrastructure](#re } ``` +#### Catch-all routes + +!!! note "We recommend having explicit routes whenever possible; use catch-all routes sparingly" + +You can use a regex string to handle an arbitrary number of paths within a request, for example `.+`. + +You can also combine nested paths with greedy regex to catch in between routes. + +!!! warning "We will choose the more explicit registered route that match incoming event" + +=== "app.py" + + ```python hl_lines="5" + from aws_lambda_powertools.event_handler.api_gateway import ApiGatewayResolver + + app = ApiGatewayResolver() + + @app.get(".+") + def catch_any_route_after_any(): + return {"path_received": app.current_event.path} + + def lambda_handler(event, context): + return app.resolve(event, context) + ``` + +=== "sample_request.json" + + ```json + { + "resource": "/any/route/should/work", + "path": "/any/route/should/work", + "httpMethod": "GET", + ... + } + ``` + ### Accessing request details By integrating with [Data classes utilities](../../utilities/data_classes.md){target="_blank"}, you have access to request details, Lambda context and also some convenient methods. diff --git a/docs/index.md b/docs/index.md index 781a96e2eb3..b79a2e7074f 100644 --- a/docs/index.md +++ b/docs/index.md @@ -124,6 +124,54 @@ If using SAM, you can include this SAR App as part of your shared Layers stack, ) ``` +=== "Terraform" + + > Credits to [Dani Comnea](https://github.com/DanyC97) for providing the Terraform equivalent. + + ```terraform hl_lines="12-13 15-20 23-25 40" + terraform { + required_version = "~> 0.13" + required_providers { + aws = "~> 3.50.0" + } + } + + provider "aws" { + region = "us-east-1" + } + + resource "aws_serverlessapplicationrepository_cloudformation_stack" "deploy_sar_stack" { + name = "aws-lambda-powertools-python-layer" + + application_id = data.aws_serverlessapplicationrepository_application.sar_app.application_id + semantic_version = data.aws_serverlessapplicationrepository_application.sar_app.semantic_version + capabilities = [ + "CAPABILITY_IAM", + "CAPABILITY_NAMED_IAM" + ] + } + + data "aws_serverlessapplicationrepository_application" "sar_app" { + application_id = "arn:aws:serverlessrepo:eu-west-1:057560766410:applications/aws-lambda-powertools-python-layer" + semantic_version = var.aws_powertools_version + } + + variable "aws_powertools_version" { + type = string + default = "1.20.2" + description = "The AWS Powertools release version" + } + + output "deployed_powertools_sar_version" { + value = data.aws_serverlessapplicationrepository_application.sar_app.semantic_version + } + + # Fetch Lambda Powertools Layer ARN from deployed SAR App + output "aws_lambda_powertools_layer_arn" { + value = aws_serverlessapplicationrepository_cloudformation_stack.deploy_sar_stack.outputs.LayerVersionArn + } + ``` + ??? tip "Example of least-privileged IAM permissions to deploy Layer" > Credits to [mwarkentin](https://github.com/mwarkentin) for providing the scoped down IAM permissions. diff --git a/docs/utilities/batch.md b/docs/utilities/batch.md index 96770fb1849..56ab160e9f9 100644 --- a/docs/utilities/batch.md +++ b/docs/utilities/batch.md @@ -143,10 +143,13 @@ Use `PartialSQSProcessor` context manager to access a list of all return values return result ``` -### Passing custom boto3 config +### Customizing boto configuration -If you need to pass custom configuration such as region to the SDK, you can pass your own [botocore config object](https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html) to -the `sqs_batch_processor` decorator: +The **`config`** and **`boto3_session`** parameters enable you to pass in a custom [botocore config object](https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html) +or a custom [boto3 session](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html) when using the `sqs_batch_processor` +decorator or `PartialSQSProcessor` class. + +> Custom config example === "Decorator" @@ -193,6 +196,53 @@ the `sqs_batch_processor` decorator: return result ``` +> Custom boto3 session example + +=== "Decorator" + + ```python hl_lines="4 12" + from aws_lambda_powertools.utilities.batch import sqs_batch_processor + from botocore.config import Config + + session = boto3.session.Session() + + def record_handler(record): + # This will be called for each individual message from a batch + # It should raise an exception if the message was not processed successfully + return_value = do_something_with(record["body"]) + return return_value + + @sqs_batch_processor(record_handler=record_handler, boto3_session=session) + def lambda_handler(event, context): + return {"statusCode": 200} + ``` + +=== "Context manager" + + ```python hl_lines="4 16" + from aws_lambda_powertools.utilities.batch import PartialSQSProcessor + import boto3 + + session = boto3.session.Session() + + def record_handler(record): + # This will be called for each individual message from a batch + # It should raise an exception if the message was not processed successfully + return_value = do_something_with(record["body"]) + return return_value + + + def lambda_handler(event, context): + records = event["Records"] + + processor = PartialSQSProcessor(boto3_session=session) + + with processor(records, record_handler): + result = processor.process() + + return result + ``` + ### Suppressing exceptions If you want to disable the default behavior where `SQSBatchProcessingError` is raised if there are any errors, you can pass the `suppress_exception` boolean argument. diff --git a/docs/utilities/data_classes.md b/docs/utilities/data_classes.md index 6cd487a2092..e05193c7702 100644 --- a/docs/utilities/data_classes.md +++ b/docs/utilities/data_classes.md @@ -96,9 +96,10 @@ Use **`APIGatewayAuthorizerRequestEvent`** for type `REQUEST` and **`APIGatewayA When the user is found, it includes the user details in the request context that will be available to the back-end, and returns a full access policy for admin users. - ```python hl_lines="2-5 26-31 36-37 40 44 46" + ```python hl_lines="2-6 29 36-42 47 49" from aws_lambda_powertools.utilities.data_classes import event_source from aws_lambda_powertools.utilities.data_classes.api_gateway_authorizer_event import ( + DENY_ALL_RESPONSE, APIGatewayAuthorizerRequestEvent, APIGatewayAuthorizerResponse, HttpVerb, @@ -108,9 +109,9 @@ Use **`APIGatewayAuthorizerRequestEvent`** for type `REQUEST` and **`APIGatewayA def get_user_by_token(token): if compare_digest(token, "admin-foo"): - return {"isAdmin": True, "name": "Admin"} + return {"id": 0, "name": "Admin", "isAdmin": True} elif compare_digest(token, "regular-foo"): - return {"name": "Joe"} + return {"id": 1, "name": "Joe"} else: return None @@ -119,25 +120,27 @@ Use **`APIGatewayAuthorizerRequestEvent`** for type `REQUEST` and **`APIGatewayA def handler(event: APIGatewayAuthorizerRequestEvent, context): user = get_user_by_token(event.get_header_value("Authorization")) + if user is None: + # No user was found + # to return 401 - `{"message":"Unauthorized"}`, but pollutes lambda error count metrics + # raise Exception("Unauthorized") + # to return 403 - `{"message":"Forbidden"}` + return DENY_ALL_RESPONSE + # parse the `methodArn` as an `APIGatewayRouteArn` arn = event.parsed_arn + # Create the response builder from parts of the `methodArn` + # and set the logged in user id and context policy = APIGatewayAuthorizerResponse( - principal_id="user", + principal_id=user["id"], + context=user, region=arn.region, aws_account_id=arn.aws_account_id, api_id=arn.api_id, - stage=arn.stage + stage=arn.stage, ) - if user is None: - # No user was found, so we return not authorized - policy.deny_all_routes() - return policy.asdict() - - # Found the user and setting the details in the context - policy.context = user - # Conditional IAM Policy if user.get("isAdmin", False): policy.allow_all_routes() @@ -299,7 +302,7 @@ In this example extract the `requestId` as the `correlation_id` for logging, use if not user: # No user found, return not authorized - return AppSyncAuthorizerResponse().to_dict() + return AppSyncAuthorizerResponse().asdict() return AppSyncAuthorizerResponse( authorize=True, diff --git a/docs/utilities/feature_flags.md b/docs/utilities/feature_flags.md index d22f9c03296..816aac8b817 100644 --- a/docs/utilities/feature_flags.md +++ b/docs/utilities/feature_flags.md @@ -366,7 +366,7 @@ You can use `get_enabled_features` method for scenarios where you need a list of "when_match": true, "conditions": [ { - "action": "IN", + "action": "KEY_IN_VALUE", "key": "CloudFront-Viewer-Country", "value": ["NL", "IE", "UK", "PL", "PT"] } @@ -450,9 +450,25 @@ The `conditions` block is a list of conditions that contain `action`, `key`, and } ``` -The `action` configuration can have 5 different values: `EQUALS`, `STARTSWITH`, `ENDSWITH`, `IN`, `NOT_IN`. +The `action` configuration can have the following values, where the expressions **`a`** is the `key` and **`b`** is the `value` above: -The `key` and `value` will be compared to the input from the context parameter. +Action | Equivalent expression +------------------------------------------------- | --------------------------------------------------------------------------------- +**EQUALS** | `lambda a, b: a == b` +**NOT_EQUALS** | `lambda a, b: a != b` +**KEY_GREATER_THAN_VALUE** | `lambda a, b: a > b` +**KEY_GREATER_THAN_OR_EQUAL_VALUE** | `lambda a, b: a >= b` +**KEY_LESS_THAN_VALUE** | `lambda a, b: a < b` +**KEY_LESS_THAN_OR_EQUAL_VALUE** | `lambda a, b: a <= b` +**STARTSWITH** | `lambda a, b: a.startswith(b)` +**ENDSWITH** | `lambda a, b: a.endswith(b)` +**KEY_IN_VALUE** | `lambda a, b: a in b` +**KEY_NOT_IN_VALUE** | `lambda a, b: a not in b` +**VALUE_IN_KEY** | `lambda a, b: b in a` +**VALUE_NOT_IN_KEY** | `lambda a, b: b not in a` + + +!!! info "The `**key**` and `**value**` will be compared to the input from the `**context**` parameter." **For multiple conditions**, we will evaluate the list of conditions as a logical `AND`, so all conditions needs to match to return `when_match` value. @@ -529,6 +545,27 @@ For this to work, you need to use a JMESPath expression via the `envelope` param } ``` +### Getting fetched configuration + +You can access the configuration fetched from the store via `get_raw_configuration` property within the store instance. + +=== "app.py" + + ```python hl_lines="12" + from aws_lambda_powertools.utilities.feature_flags import FeatureFlags, AppConfigStore + + app_config = AppConfigStore( + environment="dev", + application="product-catalogue", + name="configuration", + envelope = "feature_flags" + ) + + feature_flags = FeatureFlags(store=app_config) + + config = app_config.get_raw_configuration + ``` + ### Built-in store provider !!! info "For GA, you'll be able to bring your own store." @@ -548,6 +585,7 @@ Parameter | Default | Description **max_age** | `5` | Number of seconds to cache feature flags configuration fetched from AWS AppConfig **sdk_config** | `None` | [Botocore Config object](https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html){target="_blank"} **jmespath_options** | `None` | For advanced use cases when you want to bring your own [JMESPath functions](https://github.com/jmespath/jmespath.py#custom-functions){target="_blank"} +**logger** | `logging.Logger` | Logger to use for debug. You can optionally supply an instance of Powertools Logger. === "appconfig_store_example.py" @@ -650,3 +688,11 @@ Method | When to use | Requires new deployment on changes | Supported services **[Environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html){target="_blank"}** | Simple configuration that will rarely if ever change, because changing it requires a Lambda function deployment. | Yes | Lambda **[Parameters utility](parameters.md)** | Access to secrets, or fetch parameters in different formats from AWS System Manager Parameter Store or Amazon DynamoDB. | No | Parameter Store, DynamoDB, Secrets Manager, AppConfig **Feature flags utility** | Rule engine to define when one or multiple features should be enabled depending on the input. | No | AppConfig + + +## Deprecation list when GA + +Breaking change | Recommendation +------------------------------------------------- | --------------------------------------------------------------------------------- +`IN` RuleAction | Use `KEY_IN_VALUE` instead +`NOT_IN` RuleAction | Use `KEY_NOT_IN_VALUE` instead diff --git a/docs/utilities/idempotency.md b/docs/utilities/idempotency.md index 495fe626d4f..43eb1ac3a0b 100644 --- a/docs/utilities/idempotency.md +++ b/docs/utilities/idempotency.md @@ -206,6 +206,11 @@ In this example, we have a Lambda handler that creates a payment for a user subs Imagine the function executes successfully, but the client never receives the response due to a connection issue. It is safe to retry in this instance, as the idempotent decorator will return a previously saved response. +!!! warning "Idempotency for JSON payloads" + The payload extracted by the `event_key_jmespath` is treated as a string by default, so will be sensitive to differences in whitespace even when the JSON payload itself is identical. + + To alter this behaviour, we can use the [JMESPath built-in function](jmespath_functions.md#powertools_json-function) `powertools_json()` to treat the payload as a JSON object rather than a string. + === "payment.py" ```python hl_lines="2-4 10 12 15 20" @@ -218,7 +223,7 @@ Imagine the function executes successfully, but the client never receives the re # Treat everything under the "body" key # in the event json object as our payload - config = IdempotencyConfig(event_key_jmespath="body") + config = IdempotencyConfig(event_key_jmespath="powertools_json(body)") @idempotent(config=config, persistence_store=persistence_layer) def handler(event, context): @@ -270,6 +275,7 @@ Imagine the function executes successfully, but the client never receives the re } ``` + ### Idempotency request flow This sequence diagram shows an example flow of what happens in the payment scenario: @@ -334,7 +340,7 @@ Idempotent decorator can be further configured with **`IdempotencyConfig`** as s Parameter | Default | Description ------------------------------------------------- | ------------------------------------------------- | --------------------------------------------------------------------------------- -**event_key_jmespath** | `""` | JMESPath expression to extract the idempotency key from the event record +**event_key_jmespath** | `""` | JMESPath expression to extract the idempotency key from the event record using [built-in functions](/utilities/jmespath_functions) **payload_validation_jmespath** | `""` | JMESPath expression to validate whether certain parameters have changed in the event while the event payload **raise_on_no_idempotency_key** | `False` | Raise exception if no idempotency key was found in the request **expires_after_seconds** | 3600 | The number of seconds to wait before a record is expired @@ -542,7 +548,7 @@ This means that we will raise **`IdempotencyKeyError`** if the evaluation of **` ### Customizing boto configuration -You can provide a custom boto configuration via **`boto_config`**, or an existing boto session via **`boto3_session`** parameters, when constructing the persistence store. +The **`boto_config`** and **`boto3_session`** parameters enable you to pass in a custom [botocore config object](https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html) or a custom [boto3 session](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html) when constructing the persistence store. === "Custom session" @@ -759,6 +765,123 @@ The idempotency utility can be used with the `validator` decorator. Ensure that !!! tip "JMESPath Powertools functions are also available" Built-in functions known in the validation utility like `powertools_json`, `powertools_base64`, `powertools_base64_gzip` are also available to use in this utility. + +## Testing your code + +The idempotency utility provides several routes to test your code. + +### Disabling the idempotency utility +When testing your code, you may wish to disable the idempotency logic altogether and focus on testing your business logic. To do this, you can set the environment variable `POWERTOOLS_IDEMPOTENCY_DISABLED` +with a truthy value. If you prefer setting this for specific tests, and are using Pytest, you can use [monkeypatch](https://docs.pytest.org/en/latest/monkeypatch.html) fixture: + +=== "tests.py" + + ```python hl_lines="2 3" + def test_idempotent_lambda_handler(monkeypatch): + # Set POWERTOOLS_IDEMPOTENCY_DISABLED before calling decorated functions + monkeypatch.setenv("POWERTOOLS_IDEMPOTENCY_DISABLED", 1) + + result = handler() + ... + ``` +=== "app.py" + + ```python + from aws_lambda_powertools.utilities.idempotency import ( + DynamoDBPersistenceLayer, idempotent + ) + + persistence_layer = DynamoDBPersistenceLayer(table_name="idempotency") + + @idempotent(persistence_store=persistence_layer) + def handler(event, context): + print('expensive operation') + return { + "payment_id": 12345, + "message": "success", + "statusCode": 200, + } + ``` + +### Testing with DynamoDB Local + +To test with [DynamoDB Local](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBLocal.DownloadingAndRunning.html), you can replace the `Table` resource used by the persistence layer with one you create inside your tests. This allows you to set the endpoint_url. + +=== "tests.py" + + ```python hl_lines="6 7 8" + import boto3 + + import app + + def test_idempotent_lambda(): + # Create our own Table resource using the endpoint for our DynamoDB Local instance + resource = boto3.resource("dynamodb", endpoint_url='http://localhost:8000') + table = resource.Table(app.persistence_layer.table_name) + app.persistence_layer.table = table + + result = app.handler({'testkey': 'testvalue'}, {}) + assert result['payment_id'] == 12345 + ``` + +=== "app.py" + + ```python + from aws_lambda_powertools.utilities.idempotency import ( + DynamoDBPersistenceLayer, idempotent + ) + + persistence_layer = DynamoDBPersistenceLayer(table_name="idempotency") + + @idempotent(persistence_store=persistence_layer) + def handler(event, context): + print('expensive operation') + return { + "payment_id": 12345, + "message": "success", + "statusCode": 200, + } + ``` + +### How do I mock all DynamoDB I/O operations + +The idempotency utility lazily creates the dynamodb [Table](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#table) which it uses to access DynamoDB. +This means it is possible to pass a mocked Table resource, or stub various methods. + +=== "tests.py" + + ```python hl_lines="6 7 8 9" + from unittest.mock import MagicMock + + import app + + def test_idempotent_lambda(): + table = MagicMock() + app.persistence_layer.table = table + result = app.handler({'testkey': 'testvalue'}, {}) + table.put_item.assert_called() + ... + ``` + +=== "app.py" + + ```python + from aws_lambda_powertools.utilities.idempotency import ( + DynamoDBPersistenceLayer, idempotent + ) + + persistence_layer = DynamoDBPersistenceLayer(table_name="idempotency") + + @idempotent(persistence_store=persistence_layer) + def handler(event, context): + print('expensive operation') + return { + "payment_id": 12345, + "message": "success", + "statusCode": 200, + } + ``` + ## Extra resources If you're interested in a deep dive on how Amazon uses idempotency when building our APIs, check out diff --git a/docs/utilities/jmespath_functions.md b/docs/utilities/jmespath_functions.md new file mode 100644 index 00000000000..583357a55e2 --- /dev/null +++ b/docs/utilities/jmespath_functions.md @@ -0,0 +1,267 @@ +--- +title: JMESPath Functions +description: Utility +--- + +!!! tip "JMESPath is a query language for JSON used by AWS CLI, AWS Python SDK, and AWS Lambda Powertools for Python." + +Built-in [JMESPath](https://jmespath.org/){target="_blank"} Functions to easily deserialize common encoded JSON payloads in Lambda functions. + +## Key features + +* Deserialize JSON from JSON strings, base64, and compressed data +* Use JMESPath to extract and combine data recursively + +## Getting started + +You might have events that contains encoded JSON payloads as string, base64, or even in compressed format. It is a common use case to decode and extract them partially or fully as part of your Lambda function invocation. + +Lambda Powertools also have utilities like [validation](validation.md), [idempotency](idempotency.md), or [feature flags](feature_flags.md) where you might need to extract a portion of your data before using them. + +!!! info "**Envelope** is the terminology we use for the JMESPath expression to extract your JSON object from your data input" + +### Extracting data + +You can use the `extract_data_from_envelope` function along with any [JMESPath expression](https://jmespath.org/tutorial.html){target="_blank"}. + +=== "app.py" + + ```python hl_lines="1 7" + from aws_lambda_powertools.utilities.jmespath_utils import extract_data_from_envelope + + from aws_lambda_powertools.utilities.typing import LambdaContext + + + def handler(event: dict, context: LambdaContext): + payload = extract_data_from_envelope(data=event, envelope="powertools_json(body)") + customer = payload.get("customerId") # now deserialized + ... + ``` + +=== "event.json" + + ```json + { + "body": "{\"customerId\":\"dd4649e6-2484-4993-acb8-0f9123103394\"}" + } + ``` + +### Built-in envelopes + +We provide built-in envelopes for popular JMESPath expressions used when looking to decode/deserialize JSON objects within AWS Lambda Event Sources. + +=== "app.py" + + ```python hl_lines="1 7" + from aws_lambda_powertools.utilities.jmespath_utils import extract_data_from_envelope, envelopes + + from aws_lambda_powertools.utilities.typing import LambdaContext + + + def handler(event: dict, context: LambdaContext): + payload = extract_data_from_envelope(data=event, envelope=envelopes.SNS) + customer = payload.get("customerId") # now deserialized + ... + ``` + +=== "event.json" + + ```json hl_lines="6" + { + "Records": [ + { + "messageId": "19dd0b57-b21e-4ac1-bd88-01bbb068cb78", + "receiptHandle": "MessageReceiptHandle", + "body": "{\"customerId\":\"dd4649e6-2484-4993-acb8-0f9123103394\",\"booking\":{\"id\":\"5b2c4803-330b-42b7-811a-c68689425de1\",\"reference\":\"ySz7oA\",\"outboundFlightId\":\"20c0d2f2-56a3-4068-bf20-ff7703db552d\"},\"payment\":{\"receipt\":\"https:\/\/pay.stripe.com\/receipts\/acct_1Dvn7pF4aIiftV70\/ch_3JTC14F4aIiftV700iFq2CHB\/rcpt_K7QsrFln9FgFnzUuBIiNdkkRYGxUL0X\",\"amount\":100}}", + "attributes": { + "ApproximateReceiveCount": "1", + "SentTimestamp": "1523232000000", + "SenderId": "123456789012", + "ApproximateFirstReceiveTimestamp": "1523232000001" + }, + "messageAttributes": {}, + "md5OfBody": "7b270e59b47ff90a553787216d55d91d", + "eventSource": "aws:sqs", + "eventSourceARN": "arn:aws:sqs:us-east-1:123456789012:MyQueue", + "awsRegion": "us-east-1" + } + ] + } + ``` + +These are all built-in envelopes you can use along with their expression as a reference: + +Envelope | JMESPath expression +------------------------------------------------- | --------------------------------------------------------------------------------- +**`API_GATEWAY_REST`** | `powertools_json(body)` +**`API_GATEWAY_HTTP`** | `API_GATEWAY_REST` +**`SQS`** | `Records[*].powertools_json(body)` +**`SNS`** | `Records[0].Sns.Message | powertools_json(@)` +**`EVENTBRIDGE`** | `detail` +**`CLOUDWATCH_EVENTS_SCHEDULED`** | `EVENTBRIDGE` +**`KINESIS_DATA_STREAM`** | `Records[*].kinesis.powertools_json(powertools_base64(data))` +**`CLOUDWATCH_LOGS`** | `awslogs.powertools_base64_gzip(data) | powertools_json(@).logEvents[*]` + +## Advanced + +### Built-in JMESPath functions +You can use our built-in JMESPath functions within your expressions to do exactly that to decode JSON Strings, base64, and uncompress gzip data. + +!!! info + We use these for built-in envelopes to easily decode and unwrap events from sources like API Gateway, Kinesis, CloudWatch Logs, etc. + +#### powertools_json function + +Use `powertools_json` function to decode any JSON String anywhere a JMESPath expression is allowed. + +> **Validation scenario** + +This sample will decode the value within the `data` key into a valid JSON before we can validate it. + +=== "powertools_json_jmespath_function.py" + + ```python hl_lines="9" + from aws_lambda_powertools.utilities.validation import validate + + import schemas + + sample_event = { + 'data': '{"payload": {"message": "hello hello", "username": "blah blah"}}' + } + + validate(event=sample_event, schema=schemas.INPUT, envelope="powertools_json(data)") + ``` + +=== "schemas.py" + + ```python hl_lines="7 14 16 23 39 45 47 52" + --8<-- "docs/shared/validation_basic_jsonschema.py" + ``` + +> **Idempotency scenario** + +This sample will decode the value within the `body` key of an API Gateway event into a valid JSON object to ensure the Idempotency utility processes a JSON object instead of a string. + +=== "powertools_json_jmespath_function.py" + + ```python hl_lines="8" + import json + from aws_lambda_powertools.utilities.idempotency import ( + IdempotencyConfig, DynamoDBPersistenceLayer, idempotent + ) + + persistence_layer = DynamoDBPersistenceLayer(table_name="IdempotencyTable") + + config = IdempotencyConfig(event_key_jmespath="powertools_json(body)") + @idempotent(config=config, persistence_store=persistence_layer) + def handler(event:APIGatewayProxyEvent, context): + body = json.loads(event['body']) + payment = create_subscription_payment( + user=body['user'], + product=body['product_id'] + ) + ... + return { + "payment_id": payment.id, + "message": "success", + "statusCode": 200 + } + ``` + +#### powertools_base64 function + +Use `powertools_base64` function to decode any base64 data. + +This sample will decode the base64 value within the `data` key, and decode the JSON string into a valid JSON before we can validate it. + +=== "powertools_json_jmespath_function.py" + + ```python hl_lines="12" + from aws_lambda_powertools.utilities.validation import validate + + import schemas + + sample_event = { + "data": "eyJtZXNzYWdlIjogImhlbGxvIGhlbGxvIiwgInVzZXJuYW1lIjogImJsYWggYmxhaCJ9=" + } + + validate( + event=sample_event, + schema=schemas.INPUT, + envelope="powertools_json(powertools_base64(data))" + ) + ``` + +=== "schemas.py" + + ```python hl_lines="7 14 16 23 39 45 47 52" + --8<-- "docs/shared/validation_basic_jsonschema.py" + ``` + +#### powertools_base64_gzip function + +Use `powertools_base64_gzip` function to decompress and decode base64 data. + +This sample will decompress and decode base64 data, then use JMESPath pipeline expression to pass the result for decoding its JSON string. + +=== "powertools_json_jmespath_function.py" + + ```python hl_lines="12" + from aws_lambda_powertools.utilities.validation import validate + + import schemas + + sample_event = { + "data": "H4sIACZAXl8C/52PzUrEMBhFX2UILpX8tPbHXWHqIOiq3Q1F0ubrWEiakqTWofTdTYYB0YWL2d5zvnuTFellBIOedoiyKH5M0iwnlKH7HZL6dDB6ngLDfLFYctUKjie9gHFaS/sAX1xNEq525QxwFXRGGMEkx4Th491rUZdV3YiIZ6Ljfd+lfSyAtZloacQgAkqSJCGhxM6t7cwwuUGPz4N0YKyvO6I9WDeMPMSo8Z4Ca/kJ6vMEYW5f1MX7W1lVxaG8vqX8hNFdjlc0iCBBSF4ERT/3Pl7RbMGMXF2KZMh/C+gDpNS7RRsp0OaRGzx0/t8e0jgmcczyLCWEePhni/23JWalzjdu0a3ZvgEaNLXeugEAAA==" + } + + validate( + event=sample_event, + schema=schemas.INPUT, + envelope="powertools_base64_gzip(data) | powertools_json(@)" + ) + ``` + +=== "schemas.py" + + ```python hl_lines="7 14 16 23 39 45 47 52" + --8<-- "docs/shared/validation_basic_jsonschema.py" + ``` + +### Bring your own JMESPath function + +!!! warning + This should only be used for advanced use cases where you have special formats not covered by the built-in functions. + +For special binary formats that you want to decode before applying JSON Schema validation, you can bring your own [JMESPath function](https://github.com/jmespath/jmespath.py#custom-functions){target="_blank"} and any additional option via `jmespath_options` param. + +In order to keep the built-in functions from Powertools, you can subclass from `PowertoolsFunctions`: + +=== "custom_jmespath_function.py" + + ```python hl_lines="2-3 6-9 11 17" + from aws_lambda_powertools.utilities.jmespath_utils import ( + PowertoolsFunctions, extract_data_from_envelope) + from jmespath.functions import signature + + + class CustomFunctions(PowertoolsFunctions): + @signature({'types': ['string']}) # Only decode if value is a string + def _func_special_decoder(self, s): + return my_custom_decoder_logic(s) + + custom_jmespath_options = {"custom_functions": CustomFunctions()} + + def handler(event, context): + # use the custom name after `_func_` + extract_data_from_envelope(data=event, + envelope="special_decoder(body)", + jmespath_options=**custom_jmespath_options) + ... + ``` + +=== "event.json" + + ```json + {"body": "custom_encoded_data"} + ``` diff --git a/docs/utilities/parameters.md b/docs/utilities/parameters.md index 081d22817ab..51fd0196abd 100644 --- a/docs/utilities/parameters.md +++ b/docs/utilities/parameters.md @@ -501,3 +501,40 @@ Here is the mapping between this utility's functions and methods and the underly | DynamoDB | `DynamoDBProvider.get` | `dynamodb` | ([Table resource](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#table)) | [get_item](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Table.get_item) | DynamoDB | `DynamoDBProvider.get_multiple` | `dynamodb` | ([Table resource](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#table)) | [query](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Table.query) | App Config | `get_app_config` | `appconfig` | [get_configuration](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/appconfig.html#AppConfig.Client.get_configuration) | + + +### Customizing boto configuration + +The **`config`** and **`boto3_session`** parameters enable you to pass in a custom [botocore config object](https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html) or a custom [boto3 session](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html) when constructing any of the built-in provider classes. + +> **Example** + + +=== "Custom session" + + ```python hl_lines="2 4 5" + from aws_lambda_powertools.utilities import parameters + import boto3 + + boto3_session = boto3.session.Session() + ssm_provider = parameters.SSMProvider(boto3_session=boto3_session) + + def handler(event, context): + # Retrieve a single parameter + value = ssm_provider.get("/my/parameter") + ... + ``` +=== "Custom config" + + ```python hl_lines="2 4 5" + from aws_lambda_powertools.utilities import parameters + from botocore.config import Config + + boto_config = Config() + ssm_provider = parameters.SSMProvider(config=boto_config) + + def handler(event, context): + # Retrieve a single parameter + value = ssm_provider.get("/my/parameter") + ... + ``` diff --git a/docs/utilities/parser.md b/docs/utilities/parser.md index 47f87e355bb..9f1bed3c0cb 100644 --- a/docs/utilities/parser.md +++ b/docs/utilities/parser.md @@ -448,7 +448,7 @@ Alternatively, you can pass `'*'` as an argument for the decorator so that you c === "validate_all_field_values.py" ```python - from aws_lambda_powertools.utilities.parser import parse, BaseModel, validator + from aws_lambda_powertools.utilities.parser import parse, BaseModel, root_validator class UserModel(BaseModel): username: str diff --git a/docs/utilities/validation.md b/docs/utilities/validation.md index 7df339b7503..73f1e085164 100644 --- a/docs/utilities/validation.md +++ b/docs/utilities/validation.md @@ -429,129 +429,7 @@ For each format defined in a dictionary key, you must use a regex, or a function You might have events or responses that contain non-encoded JSON, where you need to decode before validating them. -You can use our built-in JMESPath functions within your expressions to do exactly that to decode JSON Strings, base64, and uncompress gzip data. +You can use our built-in [JMESPath functions](/utilities/jmespath_functions) within your expressions to do exactly that to decode JSON Strings, base64, and uncompress gzip data. !!! info We use these for built-in envelopes to easily to decode and unwrap events from sources like Kinesis, CloudWatch Logs, etc. - -#### powertools_json function - -Use `powertools_json` function to decode any JSON String. - -This sample will decode the value within the `data` key into a valid JSON before we can validate it. - -=== "powertools_json_jmespath_function.py" - - ```python hl_lines="9" - from aws_lambda_powertools.utilities.validation import validate - - import schemas - - sample_event = { - 'data': '{"payload": {"message": "hello hello", "username": "blah blah"}}' - } - - validate(event=sample_event, schema=schemas.INPUT, envelope="powertools_json(data)") - ``` - -=== "schemas.py" - - ```python hl_lines="7 14 16 23 39 45 47 52" - --8<-- "docs/shared/validation_basic_jsonschema.py" - ``` - -#### powertools_base64 function - -Use `powertools_base64` function to decode any base64 data. - -This sample will decode the base64 value within the `data` key, and decode the JSON string into a valid JSON before we can validate it. - -=== "powertools_json_jmespath_function.py" - - ```python hl_lines="12" - from aws_lambda_powertools.utilities.validation import validate - - import schemas - - sample_event = { - "data": "eyJtZXNzYWdlIjogImhlbGxvIGhlbGxvIiwgInVzZXJuYW1lIjogImJsYWggYmxhaCJ9=" - } - - validate( - event=sample_event, - schema=schemas.INPUT, - envelope="powertools_json(powertools_base64(data))" - ) - ``` - -=== "schemas.py" - - ```python hl_lines="7 14 16 23 39 45 47 52" - --8<-- "docs/shared/validation_basic_jsonschema.py" - ``` - -#### powertools_base64_gzip function - -Use `powertools_base64_gzip` function to decompress and decode base64 data. - -This sample will decompress and decode base64 data, then use JMESPath pipeline expression to pass the result for decoding its JSON string. - -=== "powertools_json_jmespath_function.py" - - ```python hl_lines="12" - from aws_lambda_powertools.utilities.validation import validate - - import schemas - - sample_event = { - "data": "H4sIACZAXl8C/52PzUrEMBhFX2UILpX8tPbHXWHqIOiq3Q1F0ubrWEiakqTWofTdTYYB0YWL2d5zvnuTFellBIOedoiyKH5M0iwnlKH7HZL6dDB6ngLDfLFYctUKjie9gHFaS/sAX1xNEq525QxwFXRGGMEkx4Th491rUZdV3YiIZ6Ljfd+lfSyAtZloacQgAkqSJCGhxM6t7cwwuUGPz4N0YKyvO6I9WDeMPMSo8Z4Ca/kJ6vMEYW5f1MX7W1lVxaG8vqX8hNFdjlc0iCBBSF4ERT/3Pl7RbMGMXF2KZMh/C+gDpNS7RRsp0OaRGzx0/t8e0jgmcczyLCWEePhni/23JWalzjdu0a3ZvgEaNLXeugEAAA==" - } - - validate( - event=sample_event, - schema=schemas.INPUT, - envelope="powertools_base64_gzip(data) | powertools_json(@)" - ) - ``` - -=== "schemas.py" - - ```python hl_lines="7 14 16 23 39 45 47 52" - --8<-- "docs/shared/validation_basic_jsonschema.py" - ``` - -### Bring your own JMESPath function - -!!! warning - This should only be used for advanced use cases where you have special formats not covered by the built-in functions. - - This will **replace all provided built-in functions such as `powertools_json`, so you will no longer be able to use them**. - -For special binary formats that you want to decode before applying JSON Schema validation, you can bring your own [JMESPath function](https://github.com/jmespath/jmespath.py#custom-functions){target="_blank"} and any additional option via `jmespath_options` param. - -=== "custom_jmespath_function.py" - - ```python hl_lines="2 6-10 14" - from aws_lambda_powertools.utilities.validation import validator - from jmespath import functions - - import schemas - - class CustomFunctions(functions.Functions): - - @functions.signature({'types': ['string']}) - def _func_special_decoder(self, s): - return my_custom_decoder_logic(s) - - custom_jmespath_options = {"custom_functions": CustomFunctions()} - - @validator(schema=schemas.INPUT, jmespath_options=**custom_jmespath_options) - def handler(event, context): - return event - ``` - -=== "schemas.py" - - ```python hl_lines="7 14 16 23 39 45 47 52" - --8<-- "docs/shared/validation_basic_jsonschema.py" - ``` diff --git a/mkdocs.yml b/mkdocs.yml index 94dc9980cf1..b90ba4376de 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -26,6 +26,7 @@ nav: - utilities/parser.md - utilities/idempotency.md - utilities/feature_flags.md + - utilities/jmespath_functions.md theme: name: material diff --git a/mypy.ini b/mypy.ini index 2436d7074d2..faf6014a54d 100644 --- a/mypy.ini +++ b/mypy.ini @@ -11,6 +11,12 @@ show_error_context = True [mypy-jmespath] ignore_missing_imports=True +[mypy-jmespath.exceptions] +ignore_missing_imports=True + +[mypy-jmespath.functions] +ignore_missing_imports=True + [mypy-boto3] ignore_missing_imports = True diff --git a/poetry.lock b/poetry.lock index db58802632b..8e6806f9848 100644 --- a/poetry.lock +++ b/poetry.lock @@ -81,14 +81,14 @@ d = ["aiohttp (>=3.3.2)", "aiohttp-cors"] [[package]] name = "boto3" -version = "1.18.32" +version = "1.18.54" description = "The AWS SDK for Python" category = "main" optional = false python-versions = ">= 3.6" [package.dependencies] -botocore = ">=1.21.32,<1.22.0" +botocore = ">=1.21.54,<1.22.0" jmespath = ">=0.7.1,<1.0.0" s3transfer = ">=0.5.0,<0.6.0" @@ -97,7 +97,7 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.21.32" +version = "1.21.54" description = "Low-level, data-driven core of boto 3." category = "main" optional = false @@ -149,17 +149,17 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "coverage" -version = "5.5" +version = "6.0" description = "Code coverage measurement for Python" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" +python-versions = ">=3.6" [package.dependencies] -toml = {version = "*", optional = true, markers = "extra == \"toml\""} +tomli = {version = "*", optional = true, markers = "extra == \"toml\""} [package.extras] -toml = ["toml"] +toml = ["tomli"] [[package]] name = "dataclasses" @@ -244,7 +244,7 @@ toml = "*" [[package]] name = "flake8-bugbear" -version = "21.4.3" +version = "21.9.2" description = "A plugin for flake8 finding likely bugs and design problems in your program. Contains warnings that don't belong in pyflakes and pycodestyle." category = "dev" optional = false @@ -333,17 +333,6 @@ testfixtures = ">=6.8.0,<7" [package.extras] test = ["pytest (>=4.0.2,<6)", "toml"] -[[package]] -name = "flake8-polyfill" -version = "1.0.2" -description = "Polyfill package for Flake8 plugins" -category = "dev" -optional = false -python-versions = "*" - -[package.dependencies] -flake8 = "*" - [[package]] name = "flake8-variables-names" version = "0.0.4" @@ -372,7 +361,7 @@ python-versions = "*" python-dateutil = ">=2.8.1" [package.extras] -dev = ["twine", "markdown", "flake8"] +dev = ["twine", "markdown", "flake8", "wheel"] [[package]] name = "gitdb" @@ -588,7 +577,7 @@ mkdocs = ">=0.17" [[package]] name = "mkdocs-material" -version = "7.2.6" +version = "7.3.1" description = "A Material Design theme for MkDocs" category = "dev" optional = false @@ -599,7 +588,7 @@ markdown = ">=3.2" mkdocs = ">=1.2.2" mkdocs-material-extensions = ">=1.0" Pygments = ">=2.4" -pymdown-extensions = ">=7.0" +pymdown-extensions = ">=9.0" [[package]] name = "mkdocs-material-extensions" @@ -741,7 +730,7 @@ python-versions = ">=3.5" [[package]] name = "pymdown-extensions" -version = "8.2" +version = "9.0" description = "Extension pack for Python Markdown." category = "dev" optional = false @@ -796,16 +785,15 @@ testing = ["coverage", "hypothesis (>=5.7.1)"] [[package]] name = "pytest-cov" -version = "2.12.1" +version = "3.0.0" description = "Pytest plugin for measuring coverage." category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.6" [package.dependencies] -coverage = ">=5.2.1" +coverage = {version = ">=5.2.1", extras = ["toml"]} pytest = ">=4.6" -toml = "*" [package.extras] testing = ["fields", "hunter", "process-tests", "six", "pytest-xdist", "virtualenv"] @@ -856,7 +844,7 @@ pyyaml = "*" [[package]] name = "radon" -version = "4.5.2" +version = "5.1.0" description = "Code Metrics in Python" category = "dev" optional = false @@ -864,7 +852,6 @@ python-versions = "*" [package.dependencies] colorama = {version = ">=0.4.1", markers = "python_version > \"3.4\""} -flake8-polyfill = "*" future = "*" mando = ">=0.6,<0.7" @@ -980,6 +967,14 @@ category = "dev" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +[[package]] +name = "tomli" +version = "1.2.1" +description = "A lil' TOML parser" +category = "dev" +optional = false +python-versions = ">=3.6" + [[package]] name = "typed-ast" version = "1.4.3" @@ -1030,7 +1025,7 @@ python-versions = "*" [[package]] name = "xenon" -version = "0.7.3" +version = "0.8.0" description = "Monitor code metrics for Python on your CI server" category = "dev" optional = false @@ -1038,7 +1033,7 @@ python-versions = "*" [package.dependencies] PyYAML = ">=4.2b1,<6.0" -radon = ">=4,<5" +radon = ">=4,<6" requests = ">=2.0,<3.0" [[package]] @@ -1059,7 +1054,7 @@ pydantic = ["pydantic", "email-validator"] [metadata] lock-version = "1.1" python-versions = "^3.6.1" -content-hash = "405d8f2eb75f911de58f5a57331a48ee6800a0c4065abe6c647fc8c7f0c25b87" +content-hash = "a0358de807bcf4fe1af43ac28f40f41552d559b11cfc5ebd099a4e3842f87a8d" [metadata.files] appdirs = [ @@ -1086,12 +1081,12 @@ black = [ {file = "black-20.8b1.tar.gz", hash = "sha256:1c02557aa099101b9d21496f8a914e9ed2222ef70336404eeeac8edba836fbea"}, ] boto3 = [ - {file = "boto3-1.18.32-py3-none-any.whl", hash = "sha256:a299d0c6b5a30dc2e823944286ec782aec415d83965a51f97fc9a779a04ff194"}, - {file = "boto3-1.18.32.tar.gz", hash = "sha256:f4b17a2b6e04e5ec6f494e643d05b06dd60c88943f33d6f9650dd9e7f89a7022"}, + {file = "boto3-1.18.54-py3-none-any.whl", hash = "sha256:f22a77098cd70ddf848df6981ec57b92178e9d8eb74637edbdf4173bfa9279fa"}, + {file = "boto3-1.18.54.tar.gz", hash = "sha256:2d81dc484020059fc738165984304107d4db1c6774b6310d08c892a1751f6980"}, ] botocore = [ - {file = "botocore-1.21.32-py3-none-any.whl", hash = "sha256:5803bf852304a301de41dccc3c0431053354144f3aefc7571dbe240a4288d3c5"}, - {file = "botocore-1.21.32.tar.gz", hash = "sha256:95ff61534b2a423d0e70067c39615e4e70c119773d2180d7254bf4025c54396d"}, + {file = "botocore-1.21.54-py3-none-any.whl", hash = "sha256:56b74a5186bec835baf580b2d062ea1738d3ff5a573653d41b3ad1598a5b77c4"}, + {file = "botocore-1.21.54.tar.gz", hash = "sha256:46127b3a385d0ec73d1994b8958b23b79e0613e12c486371a100df992b72a1b9"}, ] certifi = [ {file = "certifi-2020.12.5-py2.py3-none-any.whl", hash = "sha256:719a74fb9e33b9bd44cc7f3a8d94bc35e4049deebe19ba7d8e108280cfd59830"}, @@ -1110,58 +1105,41 @@ colorama = [ {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"}, ] coverage = [ - {file = "coverage-5.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:b6d534e4b2ab35c9f93f46229363e17f63c53ad01330df9f2d6bd1187e5eaacf"}, - {file = "coverage-5.5-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:b7895207b4c843c76a25ab8c1e866261bcfe27bfaa20c192de5190121770672b"}, - {file = "coverage-5.5-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:c2723d347ab06e7ddad1a58b2a821218239249a9e4365eaff6649d31180c1669"}, - {file = "coverage-5.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:900fbf7759501bc7807fd6638c947d7a831fc9fdf742dc10f02956ff7220fa90"}, - {file = "coverage-5.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:004d1880bed2d97151facef49f08e255a20ceb6f9432df75f4eef018fdd5a78c"}, - {file = "coverage-5.5-cp27-cp27m-win32.whl", hash = "sha256:06191eb60f8d8a5bc046f3799f8a07a2d7aefb9504b0209aff0b47298333302a"}, - {file = "coverage-5.5-cp27-cp27m-win_amd64.whl", hash = "sha256:7501140f755b725495941b43347ba8a2777407fc7f250d4f5a7d2a1050ba8e82"}, - {file = "coverage-5.5-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:372da284cfd642d8e08ef606917846fa2ee350f64994bebfbd3afb0040436905"}, - {file = "coverage-5.5-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:8963a499849a1fc54b35b1c9f162f4108017b2e6db2c46c1bed93a72262ed083"}, - {file = "coverage-5.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:869a64f53488f40fa5b5b9dcb9e9b2962a66a87dab37790f3fcfb5144b996ef5"}, - {file = "coverage-5.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:4a7697d8cb0f27399b0e393c0b90f0f1e40c82023ea4d45d22bce7032a5d7b81"}, - {file = "coverage-5.5-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:8d0a0725ad7c1a0bcd8d1b437e191107d457e2ec1084b9f190630a4fb1af78e6"}, - {file = "coverage-5.5-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:51cb9476a3987c8967ebab3f0fe144819781fca264f57f89760037a2ea191cb0"}, - {file = "coverage-5.5-cp310-cp310-win_amd64.whl", hash = "sha256:c0891a6a97b09c1f3e073a890514d5012eb256845c451bd48f7968ef939bf4ae"}, - {file = "coverage-5.5-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:3487286bc29a5aa4b93a072e9592f22254291ce96a9fbc5251f566b6b7343cdb"}, - {file = "coverage-5.5-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:deee1077aae10d8fa88cb02c845cfba9b62c55e1183f52f6ae6a2df6a2187160"}, - {file = "coverage-5.5-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:f11642dddbb0253cc8853254301b51390ba0081750a8ac03f20ea8103f0c56b6"}, - {file = "coverage-5.5-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:6c90e11318f0d3c436a42409f2749ee1a115cd8b067d7f14c148f1ce5574d701"}, - {file = "coverage-5.5-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:30c77c1dc9f253283e34c27935fded5015f7d1abe83bc7821680ac444eaf7793"}, - {file = "coverage-5.5-cp35-cp35m-win32.whl", hash = "sha256:9a1ef3b66e38ef8618ce5fdc7bea3d9f45f3624e2a66295eea5e57966c85909e"}, - {file = "coverage-5.5-cp35-cp35m-win_amd64.whl", hash = "sha256:972c85d205b51e30e59525694670de6a8a89691186012535f9d7dbaa230e42c3"}, - {file = "coverage-5.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:af0e781009aaf59e25c5a678122391cb0f345ac0ec272c7961dc5455e1c40066"}, - {file = "coverage-5.5-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:74d881fc777ebb11c63736622b60cb9e4aee5cace591ce274fb69e582a12a61a"}, - {file = "coverage-5.5-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:92b017ce34b68a7d67bd6d117e6d443a9bf63a2ecf8567bb3d8c6c7bc5014465"}, - {file = "coverage-5.5-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:d636598c8305e1f90b439dbf4f66437de4a5e3c31fdf47ad29542478c8508bbb"}, - {file = "coverage-5.5-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:41179b8a845742d1eb60449bdb2992196e211341818565abded11cfa90efb821"}, - {file = "coverage-5.5-cp36-cp36m-win32.whl", hash = "sha256:040af6c32813fa3eae5305d53f18875bedd079960822ef8ec067a66dd8afcd45"}, - {file = "coverage-5.5-cp36-cp36m-win_amd64.whl", hash = "sha256:5fec2d43a2cc6965edc0bb9e83e1e4b557f76f843a77a2496cbe719583ce8184"}, - {file = "coverage-5.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:18ba8bbede96a2c3dde7b868de9dcbd55670690af0988713f0603f037848418a"}, - {file = "coverage-5.5-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:2910f4d36a6a9b4214bb7038d537f015346f413a975d57ca6b43bf23d6563b53"}, - {file = "coverage-5.5-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:f0b278ce10936db1a37e6954e15a3730bea96a0997c26d7fee88e6c396c2086d"}, - {file = "coverage-5.5-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:796c9c3c79747146ebd278dbe1e5c5c05dd6b10cc3bcb8389dfdf844f3ead638"}, - {file = "coverage-5.5-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:53194af30d5bad77fcba80e23a1441c71abfb3e01192034f8246e0d8f99528f3"}, - {file = "coverage-5.5-cp37-cp37m-win32.whl", hash = "sha256:184a47bbe0aa6400ed2d41d8e9ed868b8205046518c52464fde713ea06e3a74a"}, - {file = "coverage-5.5-cp37-cp37m-win_amd64.whl", hash = "sha256:2949cad1c5208b8298d5686d5a85b66aae46d73eec2c3e08c817dd3513e5848a"}, - {file = "coverage-5.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:217658ec7187497e3f3ebd901afdca1af062b42cfe3e0dafea4cced3983739f6"}, - {file = "coverage-5.5-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1aa846f56c3d49205c952d8318e76ccc2ae23303351d9270ab220004c580cfe2"}, - {file = "coverage-5.5-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:24d4a7de75446be83244eabbff746d66b9240ae020ced65d060815fac3423759"}, - {file = "coverage-5.5-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:d1f8bf7b90ba55699b3a5e44930e93ff0189aa27186e96071fac7dd0d06a1873"}, - {file = "coverage-5.5-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:970284a88b99673ccb2e4e334cfb38a10aab7cd44f7457564d11898a74b62d0a"}, - {file = "coverage-5.5-cp38-cp38-win32.whl", hash = "sha256:01d84219b5cdbfc8122223b39a954820929497a1cb1422824bb86b07b74594b6"}, - {file = "coverage-5.5-cp38-cp38-win_amd64.whl", hash = "sha256:2e0d881ad471768bf6e6c2bf905d183543f10098e3b3640fc029509530091502"}, - {file = "coverage-5.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d1f9ce122f83b2305592c11d64f181b87153fc2c2bbd3bb4a3dde8303cfb1a6b"}, - {file = "coverage-5.5-cp39-cp39-manylinux1_i686.whl", hash = "sha256:13c4ee887eca0f4c5a247b75398d4114c37882658300e153113dafb1d76de529"}, - {file = "coverage-5.5-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:52596d3d0e8bdf3af43db3e9ba8dcdaac724ba7b5ca3f6358529d56f7a166f8b"}, - {file = "coverage-5.5-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:2cafbbb3af0733db200c9b5f798d18953b1a304d3f86a938367de1567f4b5bff"}, - {file = "coverage-5.5-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:44d654437b8ddd9eee7d1eaee28b7219bec228520ff809af170488fd2fed3e2b"}, - {file = "coverage-5.5-cp39-cp39-win32.whl", hash = "sha256:d314ed732c25d29775e84a960c3c60808b682c08d86602ec2c3008e1202e3bb6"}, - {file = "coverage-5.5-cp39-cp39-win_amd64.whl", hash = "sha256:13034c4409db851670bc9acd836243aeee299949bd5673e11844befcb0149f03"}, - {file = "coverage-5.5-pp36-none-any.whl", hash = "sha256:f030f8873312a16414c0d8e1a1ddff2d3235655a2174e3648b4fa66b3f2f1079"}, - {file = "coverage-5.5-pp37-none-any.whl", hash = "sha256:2a3859cb82dcbda1cfd3e6f71c27081d18aa251d20a17d87d26d4cd216fb0af4"}, - {file = "coverage-5.5.tar.gz", hash = "sha256:ebe78fe9a0e874362175b02371bdfbee64d8edc42a044253ddf4ee7d3c15212c"}, + {file = "coverage-6.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:3dfb23cc180b674a11a559183dff9655beb9da03088f3fe3c4f3a6d200c86f05"}, + {file = "coverage-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5dd5ae0a9cd55d71f1335c331e9625382239b8cede818fb62d8d2702336dbf8"}, + {file = "coverage-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8426fec5ad5a6e8217921716b504e9b6e1166dc147e8443b4855e329db686282"}, + {file = "coverage-6.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:aa5d4d43fa18cc9d0c6e02a83de0b9729b5451a9066574bd276481474f0a53ab"}, + {file = "coverage-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b78dd3eeb8f5ff26d2113c41836bac04a9ea91be54c346826b54a373133c8c53"}, + {file = "coverage-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:581fddd2f883379bd5af51da9233e0396b6519f3d3eeae4fb88867473be6d56e"}, + {file = "coverage-6.0-cp310-cp310-win32.whl", hash = "sha256:43bada49697a62ffa0283c7f01bbc76aac562c37d4bb6c45d56dd008d841194e"}, + {file = "coverage-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:fa816e97cfe1f691423078dffa39a18106c176f28008db017b3ce3e947c34aa5"}, + {file = "coverage-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:5c191e01b23e760338f19d8ba2470c0dad44c8b45e41ac043b2db84efc62f695"}, + {file = "coverage-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:274a612f67f931307706b60700f1e4cf80e1d79dff6c282fc9301e4565e78724"}, + {file = "coverage-6.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9dbfcbc56d8de5580483cf2caff6a59c64d3e88836cbe5fb5c20c05c29a8808"}, + {file = "coverage-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e63490e8a6675cee7a71393ee074586f7eeaf0e9341afd006c5d6f7eec7c16d7"}, + {file = "coverage-6.0-cp36-cp36m-win32.whl", hash = "sha256:72f8c99f1527c5a8ee77c890ea810e26b39fd0b4c2dffc062e20a05b2cca60ef"}, + {file = "coverage-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:88f1810eb942e7063d051d87aaaa113eb5fd5a7fd2cda03a972de57695b8bb1a"}, + {file = "coverage-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:befb5ffa9faabef6dadc42622c73de168001425258f0b7e402a2934574e7a04b"}, + {file = "coverage-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7dbda34e8e26bd86606ba8a9c13ccb114802e01758a3d0a75652ffc59a573220"}, + {file = "coverage-6.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:b4ee5815c776dfa3958ba71c7cd4cdd8eb40d79358a18352feb19562fe4408c4"}, + {file = "coverage-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d82cbef1220703ce56822be7fbddb40736fc1a928ac893472df8aff7421ae0aa"}, + {file = "coverage-6.0-cp37-cp37m-win32.whl", hash = "sha256:d795a2c92fe8cb31f6e9cd627ee4f39b64eb66bf47d89d8fcf7cb3d17031c887"}, + {file = "coverage-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:6e216e4021c934246c308fd3e0d739d9fa8a3f4ea414f584ab90ef9c1592f282"}, + {file = "coverage-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8305e14112efb74d0b5fec4df6e41cafde615c2392a7e51c84013cafe945842c"}, + {file = "coverage-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4865dc4a7a566147cbdc2b2f033a6cccc99a7dcc89995137765c384f6c73110b"}, + {file = "coverage-6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:25df2bc53a954ba2ccf230fa274d1de341f6aa633d857d75e5731365f7181749"}, + {file = "coverage-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:08fd55d2e00dac4c18a2fa26281076035ec86e764acdc198b9185ce749ada58f"}, + {file = "coverage-6.0-cp38-cp38-win32.whl", hash = "sha256:11ce082eb0f7c2bbfe96f6c8bcc3a339daac57de4dc0f3186069ec5c58da911c"}, + {file = "coverage-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:7844a8c6a0fee401edbf578713c2473e020759267c40261b294036f9d3eb6a2d"}, + {file = "coverage-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bea681309bdd88dd1283a8ba834632c43da376d9bce05820826090aad80c0126"}, + {file = "coverage-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e735ab8547d8a1fe8e58dd765d6f27ac539b395f52160d767b7189f379f9be7a"}, + {file = "coverage-6.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:7593a49300489d064ebb6c58539f52cbbc4a2e6a4385de5e92cae1563f88a425"}, + {file = "coverage-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:adb0f4c3c8ba8104378518a1954cbf3d891a22c13fd0e0bf135391835f44f288"}, + {file = "coverage-6.0-cp39-cp39-win32.whl", hash = "sha256:8da0c4a26a831b392deaba5fdd0cd7838d173b47ce2ec3d0f37be630cb09ef6e"}, + {file = "coverage-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:7af2f8e7bb54ace984de790e897f858e88068d8fbc46c9490b7c19c59cf51822"}, + {file = "coverage-6.0-pp36-none-any.whl", hash = "sha256:82b58d37c47d93a171be9b5744bcc96a0012cbf53d5622b29a49e6be2097edd7"}, + {file = "coverage-6.0-pp37-none-any.whl", hash = "sha256:fff04bfefb879edcf616f1ce5ea6f4a693b5976bdc5e163f8464f349c25b59f0"}, + {file = "coverage-6.0.tar.gz", hash = "sha256:17983f6ccc47f4864fd16d20ff677782b23d1207bf222d10e4d676e4636b0872"}, ] dataclasses = [ {file = "dataclasses-0.8-py3-none-any.whl", hash = "sha256:0201d89fa866f68c8ebd9d08ee6ff50c0b255f8ec63a71c16fda7af82bb887bf"}, @@ -1191,8 +1169,8 @@ flake8-black = [ {file = "flake8_black-0.2.3-py3-none-any.whl", hash = "sha256:cc080ba5b3773b69ba102b6617a00cc4ecbad8914109690cfda4d565ea435d96"}, ] flake8-bugbear = [ - {file = "flake8-bugbear-21.4.3.tar.gz", hash = "sha256:2346c81f889955b39e4a368eb7d508de723d9de05716c287dc860a4073dc57e7"}, - {file = "flake8_bugbear-21.4.3-py36.py37.py38-none-any.whl", hash = "sha256:4f305dca96be62bf732a218fe6f1825472a621d3452c5b994d8f89dae21dbafa"}, + {file = "flake8-bugbear-21.9.2.tar.gz", hash = "sha256:db9a09893a6c649a197f5350755100bb1dd84f110e60cf532fdfa07e41808ab2"}, + {file = "flake8_bugbear-21.9.2-py36.py37.py38-none-any.whl", hash = "sha256:4f7eaa6f05b7d7ea4cbbde93f7bcdc5438e79320fa1ec420d860c181af38b769"}, ] flake8-builtins = [ {file = "flake8-builtins-1.5.3.tar.gz", hash = "sha256:09998853b2405e98e61d2ff3027c47033adbdc17f9fe44ca58443d876eb00f3b"}, @@ -1218,10 +1196,6 @@ flake8-isort = [ {file = "flake8-isort-4.0.0.tar.gz", hash = "sha256:2b91300f4f1926b396c2c90185844eb1a3d5ec39ea6138832d119da0a208f4d9"}, {file = "flake8_isort-4.0.0-py2.py3-none-any.whl", hash = "sha256:729cd6ef9ba3659512dee337687c05d79c78e1215fdf921ed67e5fe46cce2f3c"}, ] -flake8-polyfill = [ - {file = "flake8-polyfill-1.0.2.tar.gz", hash = "sha256:e44b087597f6da52ec6393a709e7108b2905317d0c0b744cdca6208e670d8eda"}, - {file = "flake8_polyfill-1.0.2-py2.py3-none-any.whl", hash = "sha256:12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9"}, -] flake8-variables-names = [ {file = "flake8_variables_names-0.0.4.tar.gz", hash = "sha256:d6fa0571a807c72940b5773827c5760421ea6f8206595ff0a8ecfa01e42bf2cf"}, ] @@ -1230,6 +1204,7 @@ future = [ ] ghp-import = [ {file = "ghp-import-2.0.1.tar.gz", hash = "sha256:753de2eace6e0f7d4edfb3cce5e3c3b98cd52aadb80163303d1d036bda7b4483"}, + {file = "ghp_import-2.0.1-py3-none-any.whl", hash = "sha256:8241a8e9f8dd3c1fafe9696e6e081b57a208ef907e9939c44e7415e407ab40ea"}, ] gitdb = [ {file = "gitdb-4.0.7-py3-none-any.whl", hash = "sha256:6c4cc71933456991da20917998acbe6cf4fb41eeaab7d6d67fbc05ecd4c865b0"}, @@ -1352,8 +1327,8 @@ mkdocs-git-revision-date-plugin = [ {file = "mkdocs_git_revision_date_plugin-0.3.1-py3-none-any.whl", hash = "sha256:8ae50b45eb75d07b150a69726041860801615aae5f4adbd6b1cf4d51abaa03d5"}, ] mkdocs-material = [ - {file = "mkdocs-material-7.2.6.tar.gz", hash = "sha256:4bdeff63904680865676ceb3193216934de0b33fa5b2446e0a84ade60929ee54"}, - {file = "mkdocs_material-7.2.6-py2.py3-none-any.whl", hash = "sha256:4c6939b9d7d5c6db948ab02df8525c64211828ddf33286acea8b9d2115cec369"}, + {file = "mkdocs-material-7.3.1.tar.gz", hash = "sha256:d1ab269da2025f22b8fba079d7eadc05cd97ac2a21d87b09d414e69915f247a7"}, + {file = "mkdocs_material-7.3.1-py2.py3-none-any.whl", hash = "sha256:8d59c8ac241d59eef1a883c49ca685c8d8446eb054675a212fb748daff24099c"}, ] mkdocs-material-extensions = [ {file = "mkdocs-material-extensions-1.0.1.tar.gz", hash = "sha256:6947fb7f5e4291e3c61405bad3539d81e0b3cd62ae0d66ced018128af509c68f"}, @@ -1448,8 +1423,8 @@ pygments = [ {file = "Pygments-2.9.0.tar.gz", hash = "sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f"}, ] pymdown-extensions = [ - {file = "pymdown-extensions-8.2.tar.gz", hash = "sha256:b6daa94aad9e1310f9c64c8b1f01e4ce82937ab7eb53bfc92876a97aca02a6f4"}, - {file = "pymdown_extensions-8.2-py3-none-any.whl", hash = "sha256:141452d8ed61165518f2c923454bf054866b85cf466feedb0eb68f04acdc2560"}, + {file = "pymdown-extensions-9.0.tar.gz", hash = "sha256:01e4bec7f4b16beaba0087a74496401cf11afd69e3a11fe95cb593e5c698ef40"}, + {file = "pymdown_extensions-9.0-py3-none-any.whl", hash = "sha256:430cc2fbb30cef2df70edac0b4f62614a6a4d2b06462e32da4ca96098b7c1dfb"}, ] pyparsing = [ {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"}, @@ -1464,8 +1439,8 @@ pytest-asyncio = [ {file = "pytest_asyncio-0.15.1-py3-none-any.whl", hash = "sha256:3042bcdf1c5d978f6b74d96a151c4cfb9dcece65006198389ccd7e6c60eb1eea"}, ] pytest-cov = [ - {file = "pytest-cov-2.12.1.tar.gz", hash = "sha256:261ceeb8c227b726249b376b8526b600f38667ee314f910353fa318caa01f4d7"}, - {file = "pytest_cov-2.12.1-py2.py3-none-any.whl", hash = "sha256:261bb9e47e65bd099c89c3edf92972865210c36813f80ede5277dceb77a4a62a"}, + {file = "pytest-cov-3.0.0.tar.gz", hash = "sha256:e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470"}, + {file = "pytest_cov-3.0.0-py3-none-any.whl", hash = "sha256:578d5d15ac4a25e5f961c938b85a05b09fdaae9deef3bb6de9a6e766622ca7a6"}, ] pytest-mock = [ {file = "pytest-mock-3.6.1.tar.gz", hash = "sha256:40217a058c52a63f1042f0784f62009e976ba824c418cced42e88d5f40ab0e62"}, @@ -1511,8 +1486,8 @@ pyyaml-env-tag = [ {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"}, ] radon = [ - {file = "radon-4.5.2-py2.py3-none-any.whl", hash = "sha256:0fc191bfb6938e67f881764f7242c163fb3c78fc7acdfc5a0b8254c66ff9dc8b"}, - {file = "radon-4.5.2.tar.gz", hash = "sha256:63b863dd294fcc86f6aecace8d7cb4228acc2a16ab0b89c11ff60cb14182b488"}, + {file = "radon-5.1.0-py2.py3-none-any.whl", hash = "sha256:fa74e018197f1fcb54578af0f675d8b8e2342bd8e0b72bef8197bc4c9e645f36"}, + {file = "radon-5.1.0.tar.gz", hash = "sha256:cb1d8752e5f862fb9e20d82b5f758cbc4fb1237c92c9a66450ea0ea7bf29aeee"}, ] regex = [ {file = "regex-2021.4.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:619d71c59a78b84d7f18891fe914446d07edd48dc8328c8e149cbe0929b4e000"}, @@ -1622,6 +1597,10 @@ toml = [ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, ] +tomli = [ + {file = "tomli-1.2.1-py3-none-any.whl", hash = "sha256:8dd0e9524d6f386271a36b41dbf6c57d8e32fd96fd22b6584679dc569d20899f"}, + {file = "tomli-1.2.1.tar.gz", hash = "sha256:a5b75cb6f3968abb47af1b40c1819dc519ea82bcc065776a866e8d74c5ca9442"}, +] typed-ast = [ {file = "typed_ast-1.4.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6"}, {file = "typed_ast-1.4.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075"}, @@ -1690,8 +1669,8 @@ wrapt = [ {file = "wrapt-1.12.1.tar.gz", hash = "sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7"}, ] xenon = [ - {file = "xenon-0.7.3-py2.py3-none-any.whl", hash = "sha256:a167b4c329fbea7cd84b148007ba92142f46b88ca095488c175dc7a8a8007ee9"}, - {file = "xenon-0.7.3.tar.gz", hash = "sha256:eda949fbf3cfb4851d49d97e961e2b18a6b66fbecaf285dc89230775d2b2a99f"}, + {file = "xenon-0.8.0-py2.py3-none-any.whl", hash = "sha256:4c3d7157d9ae058364e130c831702e4a65a1f729d4b4def912418ed09772c851"}, + {file = "xenon-0.8.0.tar.gz", hash = "sha256:cd5cad0930673d0e52609712c63fe4721a8f4c4342dc338bd7ea5fa0666b8515"}, ] zipp = [ {file = "zipp-3.4.1-py3-none-any.whl", hash = "sha256:51cb66cc54621609dd593d1787f286ee42a5c0adbb4b29abea5a63edc3e03098"}, diff --git a/pyproject.toml b/pyproject.toml index a54704d652e..abcff627870 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "aws_lambda_powertools" -version = "1.20.2" +version = "1.21.0" description = "A suite of utilities for AWS Lambda functions to ease adopting best practices such as tracing, structured logging, custom metrics, batching, idempotency, feature flags, and more." authors = ["Amazon Web Services"] include = ["aws_lambda_powertools/py.typed", "THIRD-PARTY-LICENSES"] @@ -29,7 +29,7 @@ pydantic = {version = "^1.8.2", optional = true } email-validator = {version = "*", optional = true } [tool.poetry.dev-dependencies] -coverage = {extras = ["toml"], version = "^5.5"} +coverage = {extras = ["toml"], version = "^6.0"} pytest = "^6.2.5" black = "^20.8b1" flake8 = "^3.9.0" @@ -41,16 +41,16 @@ flake8-fixme = "^1.1.1" flake8-isort = "^4.0.0" flake8-variables-names = "^0.0.4" isort = "^5.9.3" -pytest-cov = "^2.12.1" +pytest-cov = "^3.0.0" pytest-mock = "^3.5.1" pdoc3 = "^0.10.0" pytest-asyncio = "^0.15.1" bandit = "^1.7.0" -radon = "^4.5.0" -xenon = "^0.7.3" +radon = "^5.1.0" +xenon = "^0.8.0" flake8-eradicate = "^1.1.0" -flake8-bugbear = "^21.3.2" -mkdocs-material = "^7.2.6" +flake8-bugbear = "^21.9.2" +mkdocs-material = "^7.3.1" mkdocs-git-revision-date-plugin = "^0.3.1" mike = "^0.6.0" mypy = "^0.910" diff --git a/tests/functional/data_classes/test_api_gateway_authorizer.py b/tests/functional/data_classes/test_api_gateway_authorizer.py index 7dac6cb7791..b7584ccc4a8 100644 --- a/tests/functional/data_classes/test_api_gateway_authorizer.py +++ b/tests/functional/data_classes/test_api_gateway_authorizer.py @@ -1,6 +1,7 @@ import pytest from aws_lambda_powertools.utilities.data_classes.api_gateway_authorizer_event import ( + DENY_ALL_RESPONSE, APIGatewayAuthorizerResponse, HttpVerb, ) @@ -36,7 +37,8 @@ def test_authorizer_response_invalid_resource(builder: APIGatewayAuthorizerRespo def test_authorizer_response_allow_all_routes_with_context(): - builder = APIGatewayAuthorizerResponse("foo", "us-west-1", "123456789", "fantom", "dev", {"name": "Foo"}) + arn = "arn:aws:execute-api:us-west-1:123456789:fantom/dev/GET/foo" + builder = APIGatewayAuthorizerResponse.from_route_arn(arn, principal_id="foo", context={"name": "Foo"}) builder.allow_all_routes() assert builder.asdict() == { "principalId": "foo", @@ -54,6 +56,26 @@ def test_authorizer_response_allow_all_routes_with_context(): } +def test_authorizer_response_allow_all_routes_with_usage_identifier_key(): + arn = "arn:aws:execute-api:us-east-1:1111111111:api/dev/ANY/y" + builder = APIGatewayAuthorizerResponse.from_route_arn(arn, principal_id="cow", usage_identifier_key="key") + builder.allow_all_routes() + assert builder.asdict() == { + "principalId": "cow", + "policyDocument": { + "Version": "2012-10-17", + "Statement": [ + { + "Action": "execute-api:Invoke", + "Effect": "Allow", + "Resource": ["arn:aws:execute-api:us-east-1:1111111111:api/dev/*/*"], + } + ], + }, + "usageIdentifierKey": "key", + } + + def test_authorizer_response_deny_all_routes(builder: APIGatewayAuthorizerResponse): builder.deny_all_routes() assert builder.asdict() == { @@ -145,3 +167,14 @@ def test_authorizer_response_deny_route_with_conditions(builder: APIGatewayAutho ], }, } + + +def test_deny_all(): + # CHECK we always explicitly deny all + statements = DENY_ALL_RESPONSE["policyDocument"]["Statement"] + assert len(statements) == 1 + assert statements[0] == { + "Action": "execute-api:Invoke", + "Effect": "Deny", + "Resource": ["*"], + } diff --git a/tests/functional/feature_flags/test_feature_flags.py b/tests/functional/feature_flags/test_feature_flags.py index 5342105da3d..8381dc6bf1d 100644 --- a/tests/functional/feature_flags/test_feature_flags.py +++ b/tests/functional/feature_flags/test_feature_flags.py @@ -233,9 +233,9 @@ def test_flags_conditions_no_rule_match_equal_multiple_conditions(mocker, config # check rule match for multiple of action types def test_flags_conditions_rule_match_multiple_actions_multiple_rules_multiple_conditions(mocker, config): expected_value_first_check = True - expected_value_second_check = False + expected_value_second_check = True expected_value_third_check = False - expected_value_fourth_case = False + expected_value_fourth_check = False mocked_app_config_schema = { "my_feature": { "default": expected_value_third_check, @@ -295,12 +295,14 @@ def test_flags_conditions_rule_match_multiple_actions_multiple_rules_multiple_co toggle = feature_flags.evaluate( name="my_fake_feature", context={"tenant_id": "11114446", "username": "ab"}, - default=expected_value_fourth_case, + default=expected_value_fourth_check, ) - assert toggle == expected_value_fourth_case + assert toggle == expected_value_fourth_check # check a case where the feature exists but the rule doesn't match so we revert to the default value of the feature + +# Check IN/NOT_IN/KEY_IN_VALUE/KEY_NOT_IN_VALUE/VALUE_IN_KEY/VALUE_NOT_IN_KEY conditions def test_flags_match_rule_with_in_action(mocker, config): expected_value = True mocked_app_config_schema = { @@ -397,6 +399,207 @@ def test_flags_no_match_rule_with_not_in_action(mocker, config): assert toggle == expected_value +def test_flags_match_rule_with_key_in_value_action(mocker, config): + expected_value = True + mocked_app_config_schema = { + "my_feature": { + "default": False, + "rules": { + "tenant id is contained in [6, 2]": { + "when_match": expected_value, + "conditions": [ + { + "action": RuleAction.KEY_IN_VALUE.value, + "key": "tenant_id", + "value": ["6", "2"], + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate(name="my_feature", context={"tenant_id": "6", "username": "a"}, default=False) + assert toggle == expected_value + + +def test_flags_no_match_rule_with_key_in_value_action(mocker, config): + expected_value = False + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "tenant id is contained in [8, 2]": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.KEY_IN_VALUE.value, + "key": "tenant_id", + "value": ["8", "2"], + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate(name="my_feature", context={"tenant_id": "6", "username": "a"}, default=False) + assert toggle == expected_value + + +def test_flags_match_rule_with_key_not_in_value_action(mocker, config): + expected_value = True + mocked_app_config_schema = { + "my_feature": { + "default": False, + "rules": { + "tenant id is contained in [8, 2]": { + "when_match": expected_value, + "conditions": [ + { + "action": RuleAction.KEY_NOT_IN_VALUE.value, + "key": "tenant_id", + "value": ["10", "4"], + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate(name="my_feature", context={"tenant_id": "6", "username": "a"}, default=False) + assert toggle == expected_value + + +def test_flags_no_match_rule_with_key_not_in_value_action(mocker, config): + expected_value = False + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "tenant id is contained in [8, 2]": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.KEY_NOT_IN_VALUE.value, + "key": "tenant_id", + "value": ["6", "4"], + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate(name="my_feature", context={"tenant_id": "6", "username": "a"}, default=False) + assert toggle == expected_value + + +def test_flags_match_rule_with_value_in_key_action(mocker, config): + expected_value = True + mocked_app_config_schema = { + "my_feature": { + "default": False, + "rules": { + "user is in the SYSADMIN group": { + "when_match": expected_value, + "conditions": [ + { + "action": RuleAction.VALUE_IN_KEY.value, + "key": "groups", + "value": "SYSADMIN", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", context={"tenant_id": "6", "username": "a", "groups": ["SYSADMIN", "IT"]}, default=False + ) + assert toggle == expected_value + + +def test_flags_no_match_rule_with_value_in_key_action(mocker, config): + expected_value = False + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "tenant id is contained in [8, 2]": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.VALUE_IN_KEY.value, + "key": "groups", + "value": "GUEST", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", context={"tenant_id": "6", "username": "a", "groups": ["SYSADMIN", "IT"]}, default=False + ) + assert toggle == expected_value + + +def test_flags_match_rule_with_value_not_in_key_action(mocker, config): + expected_value = True + mocked_app_config_schema = { + "my_feature": { + "default": False, + "rules": { + "user is in the GUEST group": { + "when_match": expected_value, + "conditions": [ + { + "action": RuleAction.VALUE_NOT_IN_KEY.value, + "key": "groups", + "value": "GUEST", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", context={"tenant_id": "6", "username": "a", "groups": ["SYSADMIN", "IT"]}, default=False + ) + assert toggle == expected_value + + +def test_flags_no_match_rule_with_value_not_in_key_action(mocker, config): + expected_value = False + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "user is in the SYSADMIN group": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.VALUE_NOT_IN_KEY.value, + "key": "groups", + "value": "SYSADMIN", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", context={"tenant_id": "6", "username": "a", "groups": ["SYSADMIN", "IT"]}, default=False + ) + assert toggle == expected_value + + +# Check multiple features def test_multiple_features_enabled(mocker, config): expected_value = ["my_feature", "my_feature2"] mocked_app_config_schema = { @@ -587,3 +790,410 @@ def test_get_feature_toggle_propagates_access_denied_error(mocker, config): # THEN raise StoreClientError error with pytest.raises(StoreClientError, match="AccessDeniedException") as err: feature_flags.evaluate(name="Foo", default=False) + + +def test_get_configuration_with_envelope_and_raw(mocker, config): + expected_value = True + mocked_app_config_schema = {"log_level": "INFO", "features": {"my_feature": {"default": expected_value}}} + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config, envelope="features") + + features_config = feature_flags.get_configuration() + config = feature_flags.store.get_raw_configuration + + assert "log_level" in config + assert "log_level" not in features_config + + +## +## Inequality test cases +## + +# Test not equals +def test_flags_not_equal_no_match(mocker, config): + expected_value = False + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "tenant id not equals 345345435": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.NOT_EQUALS.value, + "key": "tenant_id", + "value": "345345435", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", context={"tenant_id": "345345435", "username": "a"}, default=False + ) + assert toggle == expected_value + + +def test_flags_not_equal_match(mocker, config): + expected_value = True + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "tenant id not equals 345345435": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.NOT_EQUALS.value, + "key": "tenant_id", + "value": "345345435", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate(name="my_feature", context={"tenant_id": "", "username": "a"}, default=False) + assert toggle == expected_value + + +# Test less than +def test_flags_less_than_no_match_1(mocker, config): + expected_value = False + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "Date less than 2021.10.31": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.KEY_LESS_THAN_VALUE.value, + "key": "current_date", + "value": "2021.10.31", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", + context={"tenant_id": "345345435", "username": "a", "current_date": "2021.12.25"}, + default=False, + ) + assert toggle == expected_value + + +def test_flags_less_than_no_match_2(mocker, config): + expected_value = False + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "Date less than 2021.10.31": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.KEY_LESS_THAN_VALUE.value, + "key": "current_date", + "value": "2021.10.31", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", + context={"tenant_id": "345345435", "username": "a", "current_date": "2021.10.31"}, + default=False, + ) + assert toggle == expected_value + + +def test_flags_less_than_match(mocker, config): + expected_value = True + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "Date less than 2021.10.31": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.KEY_LESS_THAN_VALUE.value, + "key": "current_date", + "value": "2021.10.31", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", + context={"tenant_id": "345345435", "username": "a", "current_date": "2021.04.01"}, + default=False, + ) + assert toggle == expected_value + + +# Test less than or equal to +def test_flags_less_than_or_equal_no_match(mocker, config): + expected_value = False + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "Date less than or equal 2021.10.31": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.KEY_LESS_THAN_OR_EQUAL_VALUE.value, + "key": "current_date", + "value": "2021.10.31", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", + context={"tenant_id": "345345435", "username": "a", "current_date": "2021.12.25"}, + default=False, + ) + assert toggle == expected_value + + +def test_flags_less_than_or_equal_match_1(mocker, config): + expected_value = True + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "Date less than or equal 2021.10.31": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.KEY_LESS_THAN_OR_EQUAL_VALUE.value, + "key": "current_date", + "value": "2021.10.31", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", + context={"tenant_id": "345345435", "username": "a", "current_date": "2021.04.01"}, + default=False, + ) + assert toggle == expected_value + + +def test_flags_less_than_or_equal_match_2(mocker, config): + expected_value = True + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "Date less than or equal 2021.10.31": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.KEY_LESS_THAN_OR_EQUAL_VALUE.value, + "key": "current_date", + "value": "2021.10.31", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", + context={"tenant_id": "345345435", "username": "a", "current_date": "2021.10.31"}, + default=False, + ) + assert toggle == expected_value + + +# Test greater than +def test_flags_greater_than_no_match_1(mocker, config): + expected_value = False + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "Date greater than 2021.10.31": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.KEY_GREATER_THAN_VALUE.value, + "key": "current_date", + "value": "2021.10.31", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", + context={"tenant_id": "345345435", "username": "a", "current_date": "2021.04.01"}, + default=False, + ) + assert toggle == expected_value + + +def test_flags_greater_than_no_match_2(mocker, config): + expected_value = False + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "Date greater than 2021.10.31": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.KEY_GREATER_THAN_VALUE.value, + "key": "current_date", + "value": "2021.10.31", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", + context={"tenant_id": "345345435", "username": "a", "current_date": "2021.10.31"}, + default=False, + ) + assert toggle == expected_value + + +def test_flags_greater_than_match(mocker, config): + expected_value = True + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "Date greater than 2021.10.31": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.KEY_GREATER_THAN_VALUE.value, + "key": "current_date", + "value": "2021.10.31", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", + context={"tenant_id": "345345435", "username": "a", "current_date": "2021.12.25"}, + default=False, + ) + assert toggle == expected_value + + +# Test greater than or equal to +def test_flags_greater_than_or_equal_no_match(mocker, config): + expected_value = False + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "Date greater than or equal 2021.10.31": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.KEY_GREATER_THAN_OR_EQUAL_VALUE.value, + "key": "current_date", + "value": "2021.10.31", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", + context={"tenant_id": "345345435", "username": "a", "current_date": "2021.04.01"}, + default=False, + ) + assert toggle == expected_value + + +def test_flags_greater_than_or_equal_match_1(mocker, config): + expected_value = True + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "Date greater than or equal 2021.10.31": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.KEY_GREATER_THAN_OR_EQUAL_VALUE.value, + "key": "current_date", + "value": "2021.10.31", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", + context={"tenant_id": "345345435", "username": "a", "current_date": "2021.12.25"}, + default=False, + ) + assert toggle == expected_value + + +def test_flags_greater_than_or_equal_match_2(mocker, config): + expected_value = True + mocked_app_config_schema = { + "my_feature": { + "default": expected_value, + "rules": { + "Date greater than or equal 2021.10.31": { + "when_match": True, + "conditions": [ + { + "action": RuleAction.KEY_GREATER_THAN_OR_EQUAL_VALUE.value, + "key": "current_date", + "value": "2021.10.31", + } + ], + } + }, + } + } + feature_flags = init_feature_flags(mocker, mocked_app_config_schema, config) + toggle = feature_flags.evaluate( + name="my_feature", + context={"tenant_id": "345345435", "username": "a", "current_date": "2021.10.31"}, + default=False, + ) + assert toggle == expected_value diff --git a/tests/functional/feature_flags/test_schema_validation.py b/tests/functional/feature_flags/test_schema_validation.py index ce85494afce..1cd14aa4287 100644 --- a/tests/functional/feature_flags/test_schema_validation.py +++ b/tests/functional/feature_flags/test_schema_validation.py @@ -220,6 +220,26 @@ def test_valid_condition_all_actions(): CONDITION_KEY: "username", CONDITION_VALUE: ["c"], }, + { + CONDITION_ACTION: RuleAction.KEY_IN_VALUE.value, + CONDITION_KEY: "username", + CONDITION_VALUE: ["a", "b"], + }, + { + CONDITION_ACTION: RuleAction.KEY_NOT_IN_VALUE.value, + CONDITION_KEY: "username", + CONDITION_VALUE: ["c"], + }, + { + CONDITION_ACTION: RuleAction.VALUE_IN_KEY.value, + CONDITION_KEY: "groups", + CONDITION_VALUE: "SYSADMIN", + }, + { + CONDITION_ACTION: RuleAction.VALUE_NOT_IN_KEY.value, + CONDITION_KEY: "groups", + CONDITION_VALUE: "GUEST", + }, ], } }, diff --git a/tests/functional/idempotency/conftest.py b/tests/functional/idempotency/conftest.py index 2c528cafc50..71b5978497c 100644 --- a/tests/functional/idempotency/conftest.py +++ b/tests/functional/idempotency/conftest.py @@ -11,10 +11,10 @@ from botocore.config import Config from jmespath import functions -from aws_lambda_powertools.shared.jmespath_utils import extract_data_from_envelope from aws_lambda_powertools.shared.json_encoder import Encoder from aws_lambda_powertools.utilities.idempotency import DynamoDBPersistenceLayer from aws_lambda_powertools.utilities.idempotency.idempotency import IdempotencyConfig +from aws_lambda_powertools.utilities.jmespath_utils import extract_data_from_envelope from aws_lambda_powertools.utilities.validation import envelopes from tests.functional.utils import load_event @@ -122,7 +122,8 @@ def expected_params_update_item_with_validation( @pytest.fixture def expected_params_put_item(hashed_idempotency_key): return { - "ConditionExpression": "attribute_not_exists(id) OR expiration < :now", + "ConditionExpression": "attribute_not_exists(#id) OR #now < :now", + "ExpressionAttributeNames": {"#id": "id", "#now": "expiration"}, "ExpressionAttributeValues": {":now": stub.ANY}, "Item": {"expiration": stub.ANY, "id": hashed_idempotency_key, "status": "INPROGRESS"}, "TableName": "TEST_TABLE", @@ -132,7 +133,8 @@ def expected_params_put_item(hashed_idempotency_key): @pytest.fixture def expected_params_put_item_with_validation(hashed_idempotency_key, hashed_validation_key): return { - "ConditionExpression": "attribute_not_exists(id) OR expiration < :now", + "ConditionExpression": "attribute_not_exists(#id) OR #now < :now", + "ExpressionAttributeNames": {"#id": "id", "#now": "expiration"}, "ExpressionAttributeValues": {":now": stub.ANY}, "Item": { "expiration": stub.ANY, diff --git a/tests/functional/idempotency/test_idempotency.py b/tests/functional/idempotency/test_idempotency.py index cb0d43ae6fa..b1d0914d181 100644 --- a/tests/functional/idempotency/test_idempotency.py +++ b/tests/functional/idempotency/test_idempotency.py @@ -3,6 +3,7 @@ import json import sys from hashlib import md5 +from unittest.mock import MagicMock import jmespath import pytest @@ -994,3 +995,25 @@ def dummy(payload): # WHEN dummy(payload=data_two) + + +def test_idempotency_disabled_envvar(monkeypatch, lambda_context, persistence_store: DynamoDBPersistenceLayer): + # Scenario to validate no requests sent to dynamodb table when 'POWERTOOLS_IDEMPOTENCY_DISABLED' is set + mock_event = {"data": "value"} + + persistence_store.table = MagicMock() + + monkeypatch.setenv("POWERTOOLS_IDEMPOTENCY_DISABLED", "1") + + @idempotent_function(data_keyword_argument="data", persistence_store=persistence_store) + def dummy(data): + return {"message": "hello"} + + @idempotent(persistence_store=persistence_store) + def dummy_handler(event, context): + return {"message": "hi"} + + dummy(data=mock_event) + dummy_handler(mock_event, lambda_context) + + assert len(persistence_store.table.method_calls) == 0 diff --git a/tests/functional/validator/test_validator.py b/tests/functional/validator/test_validator.py index d8986ba90de..cd5c4168f56 100644 --- a/tests/functional/validator/test_validator.py +++ b/tests/functional/validator/test_validator.py @@ -1,3 +1,5 @@ +import re + import jmespath import pytest from jmespath import functions @@ -22,8 +24,22 @@ def test_validate_base64_string_envelope(schema, wrapped_event_base64_json_strin def test_validate_event_does_not_conform_with_schema(schema): - with pytest.raises(exceptions.SchemaValidationError): - validate(event={"message": "hello_world"}, schema=schema) + data = {"message": "hello_world"} + message = "data must contain ['message', 'username'] properties" + with pytest.raises( + exceptions.SchemaValidationError, + match=re.escape(f"Failed schema validation. Error: {message}, Path: ['data'], Data: {data}"), + ) as e: + validate(event=data, schema=schema) + + assert str(e.value) == e.value.message + assert e.value.validation_message == message + assert e.value.name == "data" + assert e.value.path is not None + assert e.value.value == data + assert e.value.definition == schema + assert e.value.rule == "required" + assert e.value.rule_definition == schema.get("required") def test_validate_json_string_no_envelope(schema, wrapped_event_json_string):