From 989638a42df0cd0ce4b8da211b4b8a95a50dbc84 Mon Sep 17 00:00:00 2001 From: Sergio Garcia <38561120+sergargar@users.noreply.github.com> Date: Wed, 23 Nov 2022 14:34:51 +0100 Subject: [PATCH] feat(RDS): Service and missing checks (#1513) --- lib/banner.py | 12 +- lib/check/check.py | 2 + lib/outputs/outputs.py | 15 +- providers/aws/services/acm/acm_service.py | 4 +- .../services/apigateway/apigateway_service.py | 2 +- .../apigatewayv2/apigatewayv2_service.py | 2 +- .../services/awslambda/awslambda_service.py | 17 +- .../cloudformation/cloudformation_service.py | 7 +- ...n_stacks_termination_protection_enabled.py | 2 +- .../services/cloudfront/cloudfront_service.py | 6 +- .../services/cloudwatch/cloudwatch_service.py | 6 +- .../services/codebuild/codebuild_service.py | 2 +- .../aws/services/dynamodb/dynamodb_service.py | 4 +- ...e_internet_facing_with_instance_profile.py | 2 +- .../ec2_instance_profile_attached/__init__.py | 0 ...c2_instance_profile_attached.metadata.json | 37 ++++ .../ec2_instance_profile_attached.py | 20 ++ .../ec2_instance_profile_attached_test.py | 122 +++++++++++ ...ustom_policy_permissive_role_assumption.py | 7 +- ...iam_policy_no_administrative_privileges.py | 6 +- providers/aws/services/rds/__init__.py | 0 providers/aws/services/rds/check_extra7113 | 58 ------ providers/aws/services/rds/check_extra7131 | 48 ----- providers/aws/services/rds/check_extra7132 | 47 ----- providers/aws/services/rds/check_extra7133 | 47 ----- providers/aws/services/rds/check_extra723 | 62 ------ providers/aws/services/rds/check_extra735 | 47 ----- providers/aws/services/rds/check_extra739 | 47 ----- providers/aws/services/rds/check_extra747 | 47 ----- providers/aws/services/rds/check_extra78 | 46 ----- providers/aws/services/rds/rds_client.py | 4 + .../rds_instance_backup_enabled/__init__.py | 0 .../rds_instance_backup_enabled.metadata.json | 35 ++++ .../rds_instance_backup_enabled.py | 23 +++ .../rds_instance_backup_enabled_test.py | 103 ++++++++++ .../__init__.py | 0 ...instance_deletion_protection.metadata.json | 35 ++++ .../rds_instance_deletion_protection.py | 25 +++ .../rds_instance_deletion_protection_test.py | 101 ++++++++++ .../__init__.py | 0 ..._enhanced_monitoring_enabled.metadata.json | 35 ++++ ...ds_instance_enhanced_monitoring_enabled.py | 23 +++ ...stance_enhanced_monitoring_enabled_test.py | 101 ++++++++++ .../__init__.py | 0 ..._integration_cloudwatch_logs.metadata.json | 35 ++++ ...ds_instance_integration_cloudwatch_logs.py | 21 ++ ...stance_integration_cloudwatch_logs_test.py | 101 ++++++++++ .../__init__.py | 0 ...inor_version_upgrade_enabled.metadata.json | 35 ++++ ..._instance_minor_version_upgrade_enabled.py | 23 +++ ...ance_minor_version_upgrade_enabled_test.py | 101 ++++++++++ .../rds/rds_instance_multi_az/__init__.py | 0 .../rds_instance_multi_az.metadata.json | 35 ++++ .../rds_instance_multi_az.py | 25 +++ .../rds_instance_multi_az_test.py | 101 ++++++++++ .../rds_instance_no_public_access/__init__.py | 0 ...ds_instance_no_public_access.metadata.json | 35 ++++ .../rds_instance_no_public_access.py | 25 +++ .../rds_instance_no_public_access_test.py | 101 ++++++++++ .../__init__.py | 0 ...s_instance_storage_encrypted.metadata.json | 35 ++++ .../rds_instance_storage_encrypted.py | 23 +++ .../rds_instance_storage_encrypted_test.py | 101 ++++++++++ providers/aws/services/rds/rds_service.py | 176 ++++++++++++++++ .../aws/services/rds/rds_service_test.py | 150 ++++++++++++++ .../rds_snapshots_public_access/__init__.py | 0 .../rds_snapshots_public_access.metadata.json | 35 ++++ .../rds_snapshots_public_access.py | 40 ++++ .../rds_snapshots_public_access_test.py | 190 ++++++++++++++++++ .../__init__.py | 0 ...t_level_public_access_blocks.metadata.json | 37 ++++ .../s3_account_level_public_access_blocks.py | 24 +++ ...account_level_public_access_blocks_test.py | 125 ++++++++++++ .../s3_bucket_public_access.py | 3 +- prowler | 1 + 75 files changed, 2293 insertions(+), 494 deletions(-) create mode 100644 providers/aws/services/ec2/ec2_instance_profile_attached/__init__.py create mode 100644 providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached.metadata.json create mode 100644 providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached.py create mode 100644 providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached_test.py create mode 100644 providers/aws/services/rds/__init__.py delete mode 100644 providers/aws/services/rds/check_extra7113 delete mode 100644 providers/aws/services/rds/check_extra7131 delete mode 100644 providers/aws/services/rds/check_extra7132 delete mode 100644 providers/aws/services/rds/check_extra7133 delete mode 100644 providers/aws/services/rds/check_extra723 delete mode 100644 providers/aws/services/rds/check_extra735 delete mode 100644 providers/aws/services/rds/check_extra739 delete mode 100644 providers/aws/services/rds/check_extra747 delete mode 100644 providers/aws/services/rds/check_extra78 create mode 100644 providers/aws/services/rds/rds_client.py create mode 100644 providers/aws/services/rds/rds_instance_backup_enabled/__init__.py create mode 100644 providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled.metadata.json create mode 100644 providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled.py create mode 100644 providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled_test.py create mode 100644 providers/aws/services/rds/rds_instance_deletion_protection/__init__.py create mode 100644 providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection.metadata.json create mode 100644 providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection.py create mode 100644 providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection_test.py create mode 100644 providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/__init__.py create mode 100644 providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled.metadata.json create mode 100644 providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled.py create mode 100644 providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled_test.py create mode 100644 providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/__init__.py create mode 100644 providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs.metadata.json create mode 100644 providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs.py create mode 100644 providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs_test.py create mode 100644 providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/__init__.py create mode 100644 providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled.metadata.json create mode 100644 providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled.py create mode 100644 providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled_test.py create mode 100644 providers/aws/services/rds/rds_instance_multi_az/__init__.py create mode 100644 providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az.metadata.json create mode 100644 providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az.py create mode 100644 providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az_test.py create mode 100644 providers/aws/services/rds/rds_instance_no_public_access/__init__.py create mode 100644 providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access.metadata.json create mode 100644 providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access.py create mode 100644 providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access_test.py create mode 100644 providers/aws/services/rds/rds_instance_storage_encrypted/__init__.py create mode 100644 providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted.metadata.json create mode 100644 providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted.py create mode 100644 providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted_test.py create mode 100644 providers/aws/services/rds/rds_service.py create mode 100644 providers/aws/services/rds/rds_service_test.py create mode 100644 providers/aws/services/rds/rds_snapshots_public_access/__init__.py create mode 100644 providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access.metadata.json create mode 100644 providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access.py create mode 100644 providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access_test.py create mode 100644 providers/aws/services/s3/s3_account_level_public_access_blocks/__init__.py create mode 100644 providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks.metadata.json create mode 100644 providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks.py create mode 100644 providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks_test.py diff --git a/lib/banner.py b/lib/banner.py index 7e0e5dbb..78535d85 100644 --- a/lib/banner.py +++ b/lib/banner.py @@ -22,10 +22,10 @@ def print_banner(args): if args.verbose or args.quiet: print( f""" - Color code for results: - - {Fore.YELLOW}INFO (Information){Style.RESET_ALL} - - {Fore.GREEN}PASS (Recommended value){Style.RESET_ALL} - - {orange_color}WARNING (Ignored by allowlist){Style.RESET_ALL} - - {Fore.RED}FAIL (Fix required){Style.RESET_ALL} - """ +Color code for results: +- {Fore.YELLOW}INFO (Information){Style.RESET_ALL} +- {Fore.GREEN}PASS (Recommended value){Style.RESET_ALL} +- {orange_color}WARNING (Ignored by allowlist){Style.RESET_ALL} +- {Fore.RED}FAIL (Fix required){Style.RESET_ALL} + """ ) diff --git a/lib/check/check.py b/lib/check/check.py index 1a31c5ca..ab52d462 100644 --- a/lib/check/check.py +++ b/lib/check/check.py @@ -271,4 +271,6 @@ def execute_checks( logger.error( f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) + bar.title = f"-> {Fore.GREEN}Scan is completed!" + print(Style.RESET_ALL) return all_findings diff --git a/lib/outputs/outputs.py b/lib/outputs/outputs.py index d3f38b01..0a442d08 100644 --- a/lib/outputs/outputs.py +++ b/lib/outputs/outputs.py @@ -69,7 +69,6 @@ def report(check_findings, output_options, audit_info): print( f"\t{color}{finding.status}{Style.RESET_ALL} {finding.region}: {finding.status_extended}" ) - if file_descriptors: # sending the finding to input options if "csv" in file_descriptors: @@ -109,7 +108,9 @@ def report(check_findings, output_options, audit_info): color = set_report_color("INFO") if not output_options.is_quiet and output_options.verbose: print(f"\t{color}INFO{Style.RESET_ALL} There are no resources") - + # Separator between findings and bar + if output_options.is_quiet or output_options.verbose: + print() if file_descriptors: # Close all file descriptors for file_descriptor in file_descriptors: @@ -263,10 +264,11 @@ def close_json(output_filename, output_directory, mode): filename, "a", ) - # Replace last comma for square bracket - file_descriptor.seek(file_descriptor.tell() - 1, os.SEEK_SET) - file_descriptor.truncate() - file_descriptor.write("]") + # Replace last comma for square bracket if not empty + if file_descriptor.tell() > 0: + file_descriptor.seek(file_descriptor.tell() - 1, os.SEEK_SET) + file_descriptor.truncate() + file_descriptor.write("]") file_descriptor.close() except Exception as error: logger.critical(f"{error.__class__.__name__} -- {error}") @@ -352,7 +354,6 @@ def display_summary_table( current["Low"] += 1 # Add final service - add_service_to_table(findings_table, current) print("\nOverview Results:") diff --git a/providers/aws/services/acm/acm_service.py b/providers/aws/services/acm/acm_service.py index 5522c8af..6bb50193 100644 --- a/providers/aws/services/acm/acm_service.py +++ b/providers/aws/services/acm/acm_service.py @@ -47,7 +47,7 @@ class ACM: ) except Exception as error: logger.error( - f"{regional_client.region} -- {error.__class__.__name__}: {error}" + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) def __describe_certificates__(self): @@ -69,7 +69,7 @@ class ACM: certificate.transparency_logging = True except Exception as error: logger.error( - f"{regional_client.region} -- {error.__class__.__name__}: {error}" + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) diff --git a/providers/aws/services/apigateway/apigateway_service.py b/providers/aws/services/apigateway/apigateway_service.py index eb8c4586..6a4bf0b4 100644 --- a/providers/aws/services/apigateway/apigateway_service.py +++ b/providers/aws/services/apigateway/apigateway_service.py @@ -45,7 +45,7 @@ class APIGateway: ) except Exception as error: logger.error( - f"{regional_client.region} -- {error.__class__.__name__}: {error}" + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) def __get_authorizers__(self): diff --git a/providers/aws/services/apigatewayv2/apigatewayv2_service.py b/providers/aws/services/apigatewayv2/apigatewayv2_service.py index b39e9a3a..2441941d 100644 --- a/providers/aws/services/apigatewayv2/apigatewayv2_service.py +++ b/providers/aws/services/apigatewayv2/apigatewayv2_service.py @@ -44,7 +44,7 @@ class ApiGatewayV2: ) except Exception as error: logger.error( - f"{regional_client.region} -- {error.__class__.__name__}: {error}" + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) def __get_authorizers__(self): diff --git a/providers/aws/services/awslambda/awslambda_service.py b/providers/aws/services/awslambda/awslambda_service.py index 33933aca..dc94b0aa 100644 --- a/providers/aws/services/awslambda/awslambda_service.py +++ b/providers/aws/services/awslambda/awslambda_service.py @@ -1,14 +1,15 @@ +import io +import json import threading - -from pydantic import BaseModel +import zipfile +from enum import Enum from typing import Any + +import requests +from pydantic import BaseModel + from lib.logger import logger from providers.aws.aws_provider import generate_regional_clients -import requests -from enum import Enum -import io -import zipfile -import json ################## Lambda @@ -45,7 +46,6 @@ class Lambda: lambda_name = function["FunctionName"] lambda_arn = function["FunctionArn"] lambda_runtime = function["Runtime"] - lambda_environment = function["Environment"]["Variables"] self.functions[lambda_name] = Function( name=lambda_name, arn=lambda_arn, @@ -53,6 +53,7 @@ class Lambda: region=regional_client.region, ) if "Environment" in function: + lambda_environment = function["Environment"]["Variables"] self.functions[lambda_name].environment = lambda_environment except Exception as error: diff --git a/providers/aws/services/cloudformation/cloudformation_service.py b/providers/aws/services/cloudformation/cloudformation_service.py index 2584ac98..c9318bef 100644 --- a/providers/aws/services/cloudformation/cloudformation_service.py +++ b/providers/aws/services/cloudformation/cloudformation_service.py @@ -36,8 +36,11 @@ class CloudFormation: for page in describe_stacks_paginator.paginate(): for stack in page["Stacks"]: outputs = [] - for output in stack["Outputs"]: - outputs.append(f"{output['OutputKey']}:{output['OutputValue']}") + if "Outputs" in stack: + for output in stack["Outputs"]: + outputs.append( + f"{output['OutputKey']}:{output['OutputValue']}" + ) self.stacks.append( Stack( arn=stack["StackId"], diff --git a/providers/aws/services/cloudformation/cloudformation_stacks_termination_protection_enabled/cloudformation_stacks_termination_protection_enabled.py b/providers/aws/services/cloudformation/cloudformation_stacks_termination_protection_enabled/cloudformation_stacks_termination_protection_enabled.py index c393ce95..606d8277 100644 --- a/providers/aws/services/cloudformation/cloudformation_stacks_termination_protection_enabled/cloudformation_stacks_termination_protection_enabled.py +++ b/providers/aws/services/cloudformation/cloudformation_stacks_termination_protection_enabled/cloudformation_stacks_termination_protection_enabled.py @@ -23,6 +23,6 @@ class cloudformation_stacks_termination_protection_enabled(Check): else: report.status = "FAIL" report.status_extended = f"CloudFormation {stack.name} has termination protection disabled" - findings.append(report) + findings.append(report) return findings diff --git a/providers/aws/services/cloudfront/cloudfront_service.py b/providers/aws/services/cloudfront/cloudfront_service.py index a4e7ff65..77802fcd 100644 --- a/providers/aws/services/cloudfront/cloudfront_service.py +++ b/providers/aws/services/cloudfront/cloudfront_service.py @@ -73,13 +73,13 @@ class CloudFront: default_chache_config = DefaultCacheConfigBehaviour( realtime_log_config_arn=distribution_config["DistributionConfig"][ "DefaultCacheBehavior" - ]["RealtimeLogConfigArn"], + ].get("RealtimeLogConfigArn"), viewer_protocol_policy=distribution_config["DistributionConfig"][ "DefaultCacheBehavior" - ]["ViewerProtocolPolicy"], + ].get("ViewerProtocolPolicy"), field_level_encryption_id=distribution_config["DistributionConfig"][ "DefaultCacheBehavior" - ]["FieldLevelEncryptionId"], + ].get("FieldLevelEncryptionId"), ) distributions[ distribution_id diff --git a/providers/aws/services/cloudwatch/cloudwatch_service.py b/providers/aws/services/cloudwatch/cloudwatch_service.py index cc7fc31e..1e2e8988 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_service.py +++ b/providers/aws/services/cloudwatch/cloudwatch_service.py @@ -44,7 +44,7 @@ class CloudWatch: ) except Exception as error: logger.error( - f"{regional_client.region} -- {error.__class__.__name__}: {error}" + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) @@ -91,7 +91,7 @@ class Logs: ) except Exception as error: logger.error( - f"{regional_client.region} -- {error.__class__.__name__}: {error}" + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) def __describe_log_groups__(self, regional_client): @@ -119,7 +119,7 @@ class Logs: ) except Exception as error: logger.error( - f"{regional_client.region} -- {error.__class__.__name__}: {error}" + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) diff --git a/providers/aws/services/codebuild/codebuild_service.py b/providers/aws/services/codebuild/codebuild_service.py index 768120f9..027f5e6e 100644 --- a/providers/aws/services/codebuild/codebuild_service.py +++ b/providers/aws/services/codebuild/codebuild_service.py @@ -46,7 +46,7 @@ class Codebuild: except Exception as error: logger.error( - f"{regional_client.region} -- {error.__class__.__name__}: {error}" + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) def __list_builds_for_project__(self): diff --git a/providers/aws/services/dynamodb/dynamodb_service.py b/providers/aws/services/dynamodb/dynamodb_service.py index 7860d2ac..d12040ca 100644 --- a/providers/aws/services/dynamodb/dynamodb_service.py +++ b/providers/aws/services/dynamodb/dynamodb_service.py @@ -46,7 +46,7 @@ class DynamoDB: ) except Exception as error: logger.error( - f"{regional_client.region} -- {error.__class__.__name__}: {error}" + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) def __describe_table__(self): @@ -134,7 +134,7 @@ class DAX: ) except Exception as error: logger.error( - f"{regional_client.region} -- {error.__class__.__name__}: {error}" + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) diff --git a/providers/aws/services/ec2/ec2_instance_internet_facing_with_instance_profile/ec2_instance_internet_facing_with_instance_profile.py b/providers/aws/services/ec2/ec2_instance_internet_facing_with_instance_profile/ec2_instance_internet_facing_with_instance_profile.py index 7b5a38a7..e4a4a6bf 100644 --- a/providers/aws/services/ec2/ec2_instance_internet_facing_with_instance_profile/ec2_instance_internet_facing_with_instance_profile.py +++ b/providers/aws/services/ec2/ec2_instance_internet_facing_with_instance_profile/ec2_instance_internet_facing_with_instance_profile.py @@ -13,7 +13,7 @@ class ec2_instance_internet_facing_with_instance_profile(Check): report.status_extended = f"EC2 Instance {instance.id} is not internet facing with an instance profile." if instance.public_ip and instance.instance_profile: report.status = "FAIL" - report.status_extended = f"EC2 Instance {instance.id} at IP {instance.public_ip} is internet-facing with Instance Profile {instance.instance_profile}." + report.status_extended = f"EC2 Instance {instance.id} at IP {instance.public_ip} is internet-facing with Instance Profile {instance.instance_profile['Arn']}." findings.append(report) diff --git a/providers/aws/services/ec2/ec2_instance_profile_attached/__init__.py b/providers/aws/services/ec2/ec2_instance_profile_attached/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached.metadata.json b/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached.metadata.json new file mode 100644 index 00000000..b1def736 --- /dev/null +++ b/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached.metadata.json @@ -0,0 +1,37 @@ +{ + "Provider": "aws", + "CheckID": "ec2_instance_profile_attached", + "CheckTitle": "Ensure IAM instance roles are used for AWS resource access from instances", + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], + "ServiceName": "ec2", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsEc2Instance", + "Description": "Ensure IAM instance roles are used for AWS resource access from instances.", + "Risk": "AWS access from within AWS instances can be done by either encoding AWS keys into AWS API calls or by assigning the instance to a role which has an appropriate permissions policy for the required access. AWS IAM roles reduce the risks associated with sharing and rotating credentials that can be used outside of AWS itself. If credentials are compromised, they can be used from outside of the AWS account.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "https://github.com/cloudmatos/matos/tree/master/remediations/aws/ec2/attach_iam_roles_ec2_instances", + "Terraform": "" + }, + "Recommendation": { + "Text": "Create an IAM instance role if necessary and attach it to the corresponding EC2 instance..", + "Url": "http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "", + "Compliance": [] +} diff --git a/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached.py b/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached.py new file mode 100644 index 00000000..fb3869ef --- /dev/null +++ b/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached.py @@ -0,0 +1,20 @@ +from lib.check.models import Check, Check_Report +from providers.aws.services.ec2.ec2_client import ec2_client + + +class ec2_instance_profile_attached(Check): + def execute(self): + findings = [] + for instance in ec2_client.instances: + report = Check_Report(self.metadata) + report.region = instance.region + report.resource_id = instance.id + report.status = "FAIL" + report.status_extended = f"EC2 Instance {instance.id} not associated with an Instance Profile Role." + if instance.instance_profile: + report.status = "PASS" + report.status_extended = f"EC2 Instance {instance.id} associated with Instance Profile Role {instance.instance_profile['Arn']}." + + findings.append(report) + + return findings diff --git a/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached_test.py b/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached_test.py new file mode 100644 index 00000000..2893e89b --- /dev/null +++ b/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached_test.py @@ -0,0 +1,122 @@ +from re import search +from unittest import mock + +from boto3 import client, resource +from moto import mock_ec2, mock_iam + +AWS_REGION = "us-east-1" +EXAMPLE_AMI_ID = "ami-12c6146b" + + +class Test_ec2_instance_profile_attached: + @mock_ec2 + def test_ec2_no_instances(self): + + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.ec2.ec2_service import EC2 + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.ec2.ec2_instance_profile_attached.ec2_instance_profile_attached.ec2_client", + new=EC2(current_audit_info), + ): + # Test Check + from providers.aws.services.ec2.ec2_instance_profile_attached.ec2_instance_profile_attached import ( + ec2_instance_profile_attached, + ) + + check = ec2_instance_profile_attached() + result = check.execute() + + assert len(result) == 0 + + @mock_iam + @mock_ec2 + def test_one_compliant_ec2(self): + iam = client("iam", "us-west-1") + profile_name = "fake_profile" + _ = iam.create_instance_profile( + InstanceProfileName=profile_name, + ) + ec2 = resource("ec2", region_name=AWS_REGION) + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/18") + instance = ec2.create_instances( + ImageId=EXAMPLE_AMI_ID, + MinCount=1, + MaxCount=1, + IamInstanceProfile={"Name": profile_name}, + NetworkInterfaces=[ + { + "DeviceIndex": 0, + "SubnetId": subnet.id, + "AssociatePublicIpAddress": False, + } + ], + )[0] + + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.ec2.ec2_service import EC2 + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.ec2.ec2_instance_profile_attached.ec2_instance_profile_attached.ec2_client", + new=EC2(current_audit_info), + ): + from providers.aws.services.ec2.ec2_instance_profile_attached.ec2_instance_profile_attached import ( + ec2_instance_profile_attached, + ) + + check = ec2_instance_profile_attached() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "associated with Instance Profile Role", + result[0].status_extended, + ) + assert result[0].resource_id == instance.id + + @mock_ec2 + def test_one_non_compliant_ec2(self): + ec2 = resource("ec2", region_name=AWS_REGION) + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/18") + instance = ec2.create_instances( + ImageId=EXAMPLE_AMI_ID, + MinCount=1, + MaxCount=1, + NetworkInterfaces=[ + { + "DeviceIndex": 0, + "SubnetId": subnet.id, + "AssociatePublicIpAddress": True, + } + ], + )[0] + + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.ec2.ec2_service import EC2 + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.ec2.ec2_instance_profile_attached.ec2_instance_profile_attached.ec2_client", + new=EC2(current_audit_info), + ): + from providers.aws.services.ec2.ec2_instance_profile_attached.ec2_instance_profile_attached import ( + ec2_instance_profile_attached, + ) + + check = ec2_instance_profile_attached() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "not associated with an Instance Profile", result[0].status_extended + ) + assert result[0].resource_id == instance.id diff --git a/providers/aws/services/iam/iam_no_custom_policy_permissive_role_assumption/iam_no_custom_policy_permissive_role_assumption.py b/providers/aws/services/iam/iam_no_custom_policy_permissive_role_assumption/iam_no_custom_policy_permissive_role_assumption.py index 82e936c4..80a77056 100644 --- a/providers/aws/services/iam/iam_no_custom_policy_permissive_role_assumption/iam_no_custom_policy_permissive_role_assumption.py +++ b/providers/aws/services/iam/iam_no_custom_policy_permissive_role_assumption/iam_no_custom_policy_permissive_role_assumption.py @@ -16,10 +16,11 @@ class iam_no_custom_policy_permissive_role_assumption(Check): if ( statement["Effect"] == "Allow" and ( - statement["Action"] == "sts:AssumeRole" - or statement["Action"] == "sts:*" + "sts:AssumeRole" in statement["Action"] + or "sts:*" in statement["Action"] + or "*" in statement["Action"] ) - and statement["Resource"] == "*" + and "*" in statement["Resource"] ): report.status = "FAIL" report.status_extended = f"Custom Policy {iam_client.policies[index]['PolicyName']} allows permissive STS Role assumption" diff --git a/providers/aws/services/iam/iam_policy_no_administrative_privileges/iam_policy_no_administrative_privileges.py b/providers/aws/services/iam/iam_policy_no_administrative_privileges/iam_policy_no_administrative_privileges.py index 128369e7..e30741d8 100644 --- a/providers/aws/services/iam/iam_policy_no_administrative_privileges/iam_policy_no_administrative_privileges.py +++ b/providers/aws/services/iam/iam_policy_no_administrative_privileges/iam_policy_no_administrative_privileges.py @@ -15,9 +15,9 @@ class iam_policy_no_administrative_privileges(Check): # Check the statements, if one includes *:* stop iterating over the rest for statement in policy_document["Statement"]: if ( - statement["Action"] == "*" - and statement["Effect"] == "Allow" - and statement["Resource"] == "*" + statement["Effect"] == "Allow" + and "*" in statement["Action"] + and "*" in statement["Resource"] ): report.status = "FAIL" report.status_extended = f"Policy {iam_client.policies[index]['PolicyName']} allows '*:*' administrative privileges" diff --git a/providers/aws/services/rds/__init__.py b/providers/aws/services/rds/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/rds/check_extra7113 b/providers/aws/services/rds/check_extra7113 deleted file mode 100644 index 89dac00e..00000000 --- a/providers/aws/services/rds/check_extra7113 +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env bash - -# Prowler - the handy cloud security tool (copyright 2018) by Toni de la Fuente -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. - -# Remediation: -# -# https://www.cloudconformity.com/knowledge-base/aws/RDS/instance-deletion-protection.html -# https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html -# -# aws rds modify-db-instance \ -# --region us-east-1 \ -# --db-instance-identifier test-db \ -# --deletion-protection \ -# [--apply-immediately | --no-apply-immediately] - -CHECK_ID_extra7113="7.113" -CHECK_TITLE_extra7113="[extra7113] Check if RDS instances have deletion protection enabled " -CHECK_SCORED_extra7113="NOT_SCORED" -CHECK_CIS_LEVEL_extra7113="EXTRA" -CHECK_SEVERITY_extra7113="Medium" -CHECK_ASFF_RESOURCE_TYPE_extra7113="AwsRdsDbInstance" -CHECK_ALTERNATE_check7113="extra7113" -CHECK_SERVICENAME_extra7113="rds" -CHECK_RISK_extra7113='You can only delete instances that do not have deletion protection enabled.' -CHECK_REMEDIATION_extra7113='Enable deletion protection using the AWS Management Console for production DB instances.' -CHECK_DOC_extra7113='https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html' -CHECK_CAF_EPIC_extra7113='Data Protection' - -extra7113(){ - for regx in $REGIONS; do - LIST_OF_RDS_INSTANCES=$($AWSCLI rds describe-db-instances $PROFILE_OPT --region $regx --query "DBInstances[?Engine != 'docdb'].DBInstanceIdentifier" --output text 2>&1) - if [[ $(echo "$LIST_OF_RDS_INSTANCES" | grep -E 'AccessDenied|UnauthorizedOperation|AuthorizationError') ]]; then - textInfo "$regx: Access Denied trying to describe DB instances" "$regx" - continue - fi - if [[ $LIST_OF_RDS_INSTANCES ]];then - for rdsinstance in $LIST_OF_RDS_INSTANCES; do - IS_DELETIONPROTECTION=$($AWSCLI rds describe-db-instances $PROFILE_OPT --region $regx --db-instance-identifier $rdsinstance --query 'DBInstances[*].DeletionProtection' --output text) - if [[ $IS_DELETIONPROTECTION == "False" ]]; then - textFail "$regx: RDS instance $rdsinstance deletion protection is not enabled!" "$regx" "$rdsinstance" - else - textPass "$regx: RDS instance $rdsinstance deletion protection is enabled" "$regx" "$rdsinstance" - fi - done - else - textInfo "$regx: No RDS instances found" "$regx" - fi - done -} diff --git a/providers/aws/services/rds/check_extra7131 b/providers/aws/services/rds/check_extra7131 deleted file mode 100644 index ac08d35c..00000000 --- a/providers/aws/services/rds/check_extra7131 +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash - -# Prowler - the handy cloud security tool (copyright 2018) by Toni de la Fuente -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -CHECK_ID_extra7131="7.131" -CHECK_TITLE_extra7131="[extra7131] Ensure RDS instances have minor version upgrade enabled" -CHECK_SCORED_extra7131="NOT_SCORED" -CHECK_CIS_LEVEL_extra7131="EXTRA" -CHECK_SEVERITY_extra7131="Low" -CHECK_ASFF_RESOURCE_TYPE_extra7131="AwsRdsDbInstance" -CHECK_ALTERNATE_check7131="extra7131" -CHECK_SERVICENAME_extra7131="rds" -CHECK_RISK_extra7131='Auto Minor Version Upgrade is a feature that you can enable to have your database automatically upgraded when a new minor database engine version is available. Minor version upgrades often patch security vulnerabilities and fix bugs; and therefor should be applied.' -CHECK_REMEDIATION_extra7131='Enable auto minor version upgrade for all databases and environments.' -CHECK_DOC_extra7131='https://aws.amazon.com/blogs/database/best-practices-for-upgrading-amazon-rds-to-major-and-minor-versions-of-postgresql/' -CHECK_CAF_EPIC_extra7131='Infrastructure Security' - -extra7131(){ - for regx in $REGIONS; do - # LIST_OF_RDS_PUBLIC_INSTANCES=$($AWSCLI rds describe-db-instances $PROFILE_OPT --region $regx --query 'DBInstances[?PubliclyAccessible==`true` && DBInstanceStatus==`"available"`].[DBInstanceIdentifier,Endpoint.Address]' --output text) - LIST_OF_RDS_INSTANCES=$($AWSCLI rds describe-db-instances $PROFILE_OPT --region $regx --query 'DBInstances[*].[DBInstanceIdentifier,AutoMinorVersionUpgrade]' --output text 2>&1) - if [[ $(echo "$LIST_OF_RDS_INSTANCES" | grep -E 'AccessDenied|UnauthorizedOperation|AuthorizationError') ]]; then - textInfo "$regx: Access Denied trying to describe DB instances" "$regx" - continue - fi - if [[ $LIST_OF_RDS_INSTANCES ]];then - while read -r rds_instance;do - RDS_NAME=$(echo $rds_instance | awk '{ print $1; }') - RDS_AUTOMINORUPGRADE_FLAG=$(echo $rds_instance | awk '{ print $2; }') - if [[ $RDS_AUTOMINORUPGRADE_FLAG == "True" ]];then - textPass "$regx: RDS instance: $RDS_NAME is has minor version upgrade enabled" "$regx" "$RDS_NAME" - else - textFail "$regx: RDS instance: $RDS_NAME does not have minor version upgrade enabled" "$regx" "$RDS_NAME" - fi - done <<< "$LIST_OF_RDS_INSTANCES" - else - textInfo "$regx: no RDS instances found" "$regx" "$RDS_NAME" - fi - done -} diff --git a/providers/aws/services/rds/check_extra7132 b/providers/aws/services/rds/check_extra7132 deleted file mode 100644 index d3f0c9c4..00000000 --- a/providers/aws/services/rds/check_extra7132 +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -# Prowler - the handy cloud security tool (copyright 2018) by Toni de la Fuente -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -CHECK_ID_extra7132="7.132" -CHECK_TITLE_extra7132="[extra7132] Check if RDS instances has enhanced monitoring enabled" -CHECK_SCORED_extra7132="NOT_SCORED" -CHECK_CIS_LEVEL_extra7132="EXTRA" -CHECK_SEVERITY_extra7132="Low" -CHECK_ASFF_RESOURCE_TYPE_extra7132="AwsRdsDbInstance" -CHECK_ALTERNATE_check7132="extra7132" -CHECK_SERVICENAME_extra7132="rds" -CHECK_RISK_extra7132='A smaller monitoring interval results in more frequent reporting of OS metrics.' -CHECK_REMEDIATION_extra7132='To use Enhanced Monitoring; you must create an IAM role; and then enable Enhanced Monitoring.' -CHECK_DOC_extra7132='https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html' -CHECK_CAF_EPIC_extra7132='Logging and Monitoring' - -extra7132(){ - for regx in $REGIONS; do - RDS_INSTANCES=$($AWSCLI rds describe-db-instances $PROFILE_OPT --region $regx --query "DBInstances[?Engine != 'docdb'].DBInstanceIdentifier" --output text 2>&1) - if [[ $(echo "$RDS_INSTANCES" | grep -E 'AccessDenied|UnauthorizedOperation|AuthorizationError') ]]; then - textInfo "$regx: Access Denied trying to describe DB instances" "$regx" - continue - fi - if [[ $RDS_INSTANCES ]];then - for rdsinstance in ${RDS_INSTANCES}; do - RDS_NAME="$rdsinstance" - MONITORING_FLAG=$($AWSCLI rds describe-db-instances $PROFILE_OPT --region $regx --db-instance-identifier $rdsinstance --query 'DBInstances[*].[EnhancedMonitoringResourceArn]' --output text) - if [[ $MONITORING_FLAG == "None" ]];then - textFail "$regx: RDS instance: $RDS_NAME has enhanced monitoring disabled!" "$rex" "$RDS_NAME" - else - textPass "$regx: RDS instance: $RDS_NAME has enhanced monitoring enabled." "$regx" "$RDS_NAME" - fi - done - else - textInfo "$regx: no RDS instances found" "$regx" "$RDS_NAME" - fi - done -} diff --git a/providers/aws/services/rds/check_extra7133 b/providers/aws/services/rds/check_extra7133 deleted file mode 100644 index d39c20b8..00000000 --- a/providers/aws/services/rds/check_extra7133 +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -# Prowler - the handy cloud security tool (copyright 2018) by Toni de la Fuente -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -CHECK_ID_extra7133="7.133" -CHECK_TITLE_extra7133="[extra7133] Check if RDS instances have multi-AZ enabled" -CHECK_SCORED_extra7133="NOT_SCORED" -CHECK_CIS_LEVEL_extra7133="EXTRA" -CHECK_SEVERITY_extra7133="Medium" -CHECK_ASFF_RESOURCE_TYPE_extra7133="AwsRdsDbInstance" -CHECK_ALTERNATE_check7133="extra7133" -CHECK_SERVICENAME_extra7133="rds" -CHECK_RISK_extra7133='In case of failure; with a single-AZ deployment configuration; should an availability zone specific database failure occur; Amazon RDS can not automatically fail over to the standby availability zone.' -CHECK_REMEDIATION_extra7133='Enable multi-AZ deployment for production databases.' -CHECK_DOC_extra7133='https://aws.amazon.com/rds/features/multi-az/' -CHECK_CAF_EPIC_extra7133='Data Protection' - -extra7133(){ - for regx in $REGIONS; do - RDS_INSTANCES=$($AWSCLI rds describe-db-instances $PROFILE_OPT --region $regx --query "DBInstances[?Engine != 'docdb'].DBInstanceIdentifier" --output text 2>&1) - if [[ $(echo "$RDS_INSTANCES" | grep -E 'AccessDenied|UnauthorizedOperation|AuthorizationError') ]]; then - textInfo "$regx: Access Denied trying to describe DB instances" "$regx" - continue - fi - if [[ $RDS_INSTANCES ]];then - for rdsinstance in ${RDS_INSTANCES}; do - RDS_NAME="$rdsinstance" - MULTIAZ_FLAG=$($AWSCLI rds describe-db-instances $PROFILE_OPT --region $regx --db-instance-identifier $rdsinstance --query 'DBInstances[*].MultiAZ' --output text) - if [[ $MULTIAZ_FLAG == "True" ]];then - textPass "$regx: RDS instance: $RDS_NAME has multi-AZ enabled" "$regx" "$RDS_NAME" - else - textFail "$regx: RDS instance: $RDS_NAME has multi-AZ disabled!" "$regx" "$RDS_NAME" - fi - done - else - textInfo "$regx: no RDS instances found" "$regx" "$RDS_NAME" - fi - done -} diff --git a/providers/aws/services/rds/check_extra723 b/providers/aws/services/rds/check_extra723 deleted file mode 100644 index 617b3bcb..00000000 --- a/providers/aws/services/rds/check_extra723 +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env bash - -# Prowler - the handy cloud security tool (copyright 2018) by Toni de la Fuente -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -CHECK_ID_extra723="7.23" -CHECK_TITLE_extra723="[extra723] Check if RDS Snapshots and Cluster Snapshots are public" -CHECK_SCORED_extra723="NOT_SCORED" -CHECK_CIS_LEVEL_extra723="EXTRA" -CHECK_SEVERITY_extra723="Critical" -CHECK_ASFF_RESOURCE_TYPE_extra723="AwsRdsDbSnapshot" -CHECK_ALTERNATE_check723="extra723" -CHECK_SERVICENAME_extra723="rds" -CHECK_RISK_extra723='Publicly accessible services could expose sensitive data to bad actors. t is recommended that your RDS snapshots should not be public in order to prevent potential leak or misuse of sensitive data or any other kind of security threat. If your RDS snapshot is public; then the data which is backed up in that snapshot is accessible to all other AWS accounts.' -CHECK_REMEDIATION_extra723='Use AWS Config to identify any sanpshot that is public.' -CHECK_DOC_extra723='https://docs.aws.amazon.com/config/latest/developerguide/rds-snapshots-public-prohibited.html' -CHECK_CAF_EPIC_extra723='Data Protection' - -extra723(){ - # "Check if RDS Snapshots are public " - for regx in $REGIONS; do - # RDS snapshots - LIST_OF_RDS_SNAPSHOTS=$($AWSCLI rds describe-db-snapshots $PROFILE_OPT --region $regx --query DBSnapshots[*].DBSnapshotIdentifier --output text 2>&1) - if [[ $(echo "$LIST_OF_RDS_SNAPSHOTS" | grep -E 'AccessDenied|UnauthorizedOperation') ]]; then - textInfo "$regx: Access Denied trying to describe db snapshots" "$regx" - continue - fi - if [[ $LIST_OF_RDS_SNAPSHOTS ]]; then - for rdssnapshot in $LIST_OF_RDS_SNAPSHOTS;do - SNAPSHOT_IS_PUBLIC=$($AWSCLI rds describe-db-snapshot-attributes $PROFILE_OPT --region $regx --db-snapshot-identifier $rdssnapshot --query DBSnapshotAttributesResult.DBSnapshotAttributes[*] --output text|grep ^ATTRIBUTEVALUES|cut -f2|grep all) - if [[ $SNAPSHOT_IS_PUBLIC ]];then - textFail "$regx: RDS Snapshot $rdssnapshot is public!" "$regx" "$rdssnapshot" - else - textPass "$regx: RDS Snapshot $rdssnapshot is not shared" "$regx" "$rdssnapshot" - fi - done - else - textInfo "$regx: No RDS Snapshots found" "$regx" "$rdssnapshot" - fi - # RDS cluster snapshots - LIST_OF_RDS_CLUSTER_SNAPSHOTS=$($AWSCLI rds describe-db-cluster-snapshots $PROFILE_OPT --region $regx --query DBClusterSnapshots[*].DBClusterSnapshotIdentifier --output text) - if [[ $LIST_OF_RDS_CLUSTER_SNAPSHOTS ]]; then - for rdsclustersnapshot in $LIST_OF_RDS_CLUSTER_SNAPSHOTS;do - CLUSTER_SNAPSHOT_IS_PUBLIC=$($AWSCLI rds describe-db-cluster-snapshot-attributes $PROFILE_OPT --region $regx --db-cluster-snapshot-identifier $rdsclustersnapshot --query DBClusterSnapshotAttributesResult.DBClusterSnapshotAttributes[*] --output text|grep ^ATTRIBUTEVALUES|cut -f2|grep all) - if [[ $CLUSTER_SNAPSHOT_IS_PUBLIC ]];then - textFail "$regx: RDS Cluster Snapshot $rdsclustersnapshot is public!" "$regx" "$rdsclustersnapshot" - else - textPass "$regx: RDS Cluster Snapshot $rdsclustersnapshot is not shared" "$regx" "$rdsclustersnapshot" - fi - done - else - textInfo "$regx: No RDS Cluster Snapshots found" "$regx" "$rdsclustersnapshot" - fi - done -} diff --git a/providers/aws/services/rds/check_extra735 b/providers/aws/services/rds/check_extra735 deleted file mode 100644 index 0c79f388..00000000 --- a/providers/aws/services/rds/check_extra735 +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -# Prowler - the handy cloud security tool (copyright 2018) by Toni de la Fuente -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -CHECK_ID_extra735="7.35" -CHECK_TITLE_extra735="[extra735] Check if RDS instances storage is encrypted" -CHECK_SCORED_extra735="NOT_SCORED" -CHECK_CIS_LEVEL_extra735="EXTRA" -CHECK_SEVERITY_extra735="Medium" -CHECK_ASFF_RESOURCE_TYPE_extra735="AwsRdsDbInstance" -CHECK_ALTERNATE_check735="extra735" -CHECK_ASFF_COMPLIANCE_TYPE_extra735="ens-mp.info.3.aws.rds.1" -CHECK_SERVICENAME_extra735="rds" -CHECK_RISK_extra735='If not enabled sensitive information at rest is not protected.' -CHECK_REMEDIATION_extra735='Enable Encryption. Use a CMK where possible. It will provide additional management and privacy benefits.' -CHECK_DOC_extra735='https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Encryption.html' -CHECK_CAF_EPIC_extra735='Data Protection' - -extra735(){ - for regx in $REGIONS; do - LIST_OF_RDS_INSTANCES=$($AWSCLI rds describe-db-instances $PROFILE_OPT --region $regx --query 'DBInstances[*].DBInstanceIdentifier' --output text 2>&1) - if [[ $(echo "$LIST_OF_RDS_INSTANCES" | grep -E 'AccessDenied|UnauthorizedOperation|AuthorizationError') ]]; then - textInfo "$regx: Access Denied trying to describe DB instances" "$regx" - continue - fi - if [[ $LIST_OF_RDS_INSTANCES ]];then - for rdsinstance in $LIST_OF_RDS_INSTANCES; do - IS_ENCRYPTED=$($AWSCLI rds describe-db-instances $PROFILE_OPT --region $regx --db-instance-identifier $rdsinstance --query 'DBInstances[*].StorageEncrypted' --output text) - if [[ $IS_ENCRYPTED == "False" ]]; then - textFail "$regx: RDS instance $rdsinstance is not encrypted!" "$regx" "$rdsinstance" - else - textPass "$regx: RDS instance $rdsinstance is encrypted" "$regx" "$rdsinstance" - fi - done - else - textInfo "$regx: No RDS instances found" "$regx" "$rdsinstance" - fi - done -} diff --git a/providers/aws/services/rds/check_extra739 b/providers/aws/services/rds/check_extra739 deleted file mode 100644 index 162b1a17..00000000 --- a/providers/aws/services/rds/check_extra739 +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -# Prowler - the handy cloud security tool (copyright 2018) by Toni de la Fuente -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -CHECK_ID_extra739="7.39" -CHECK_TITLE_extra739="[extra739] Check if RDS instances have backup enabled" -CHECK_SCORED_extra739="NOT_SCORED" -CHECK_CIS_LEVEL_extra739="EXTRA" -CHECK_SEVERITY_extra739="Medium" -CHECK_ASFF_RESOURCE_TYPE_extra739="AwsRdsDbInstance" -CHECK_ALTERNATE_check739="extra739" -CHECK_SERVICENAME_extra739="rds" -CHECK_RISK_extra739='If backup is not enabled; data is vulnerable. Human error or bad actors could erase or modify data.' -CHECK_REMEDIATION_extra739='Enable automated backup for production data. Define a retention period and periodically test backup restoration. A Disaster Recovery process should be in place to govern Data Protection approach.' -CHECK_DOC_extra739='https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html' -CHECK_CAF_EPIC_extra739='Data Protection' - -extra739(){ - for regx in $REGIONS; do - LIST_OF_RDS_INSTANCES=$($AWSCLI rds describe-db-instances $PROFILE_OPT --region $regx --query 'DBInstances[*].DBInstanceIdentifier' --output text 2>&1) - if [[ $(echo "$LIST_OF_RDS_INSTANCES" | grep -E 'AccessDenied|UnauthorizedOperation|AuthorizationError') ]]; then - textInfo "$regx: Access Denied trying to describe DB instances" "$regx" - continue - fi - if [[ $LIST_OF_RDS_INSTANCES ]];then - for rdsinstance in $LIST_OF_RDS_INSTANCES; do - # if retention is 0 then is disabled - BACKUP_RETENTION=$($AWSCLI rds describe-db-instances $PROFILE_OPT --region $regx --db-instance-identifier $rdsinstance --query 'DBInstances[*].BackupRetentionPeriod' --output text) - if [[ $BACKUP_RETENTION == "0" ]]; then - textFail "$regx: RDS instance $rdsinstance has not backup enabled!" "$regx" "$rdsinstance" - else - textPass "$regx: RDS instance $rdsinstance has backup enabled with retention period $BACKUP_RETENTION days" "$regx" "$rdsinstance" - fi - done - else - textInfo "$regx: No RDS instances found" "$regx" "$rdsinstance" - fi - done -} diff --git a/providers/aws/services/rds/check_extra747 b/providers/aws/services/rds/check_extra747 deleted file mode 100644 index 4efd62ef..00000000 --- a/providers/aws/services/rds/check_extra747 +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -# Prowler - the handy cloud security tool (copyright 2018) by Toni de la Fuente -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -CHECK_ID_extra747="7.47" -CHECK_TITLE_extra747="[extra747] Check if RDS instances is integrated with CloudWatch Logs" -CHECK_SCORED_extra747="NOT_SCORED" -CHECK_CIS_LEVEL_extra747="EXTRA" -CHECK_SEVERITY_extra747="Medium" -CHECK_ASFF_RESOURCE_TYPE_extra747="AwsRdsDbInstance" -CHECK_ALTERNATE_check747="extra747" -CHECK_SERVICENAME_extra747="rds" -CHECK_RISK_extra747='If logs are not enabled; monitoring of service use and threat analysis is not possible.' -CHECK_REMEDIATION_extra747='Use CloudWatch Logs to perform real-time analysis of the log data. Create alarms and view metrics.' -CHECK_DOC_extra747='https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/publishing_cloudwatchlogs.html' -CHECK_CAF_EPIC_extra747='Logging and Monitoring' - -extra747(){ - for regx in $REGIONS; do - LIST_OF_RDS_INSTANCES=$($AWSCLI rds describe-db-instances $PROFILE_OPT --region $regx --query 'DBInstances[*].DBInstanceIdentifier' --output text 2>&1) - if [[ $(echo "$LIST_OF_RDS_INSTANCES" | grep -E 'AccessDenied|UnauthorizedOperation|AuthorizationError') ]]; then - textInfo "$regx: Access Denied trying to get rest APIs" "$regx" - continue - fi - if [[ $LIST_OF_RDS_INSTANCES ]];then - for rdsinstance in $LIST_OF_RDS_INSTANCES; do - # if retention is 0 then is disabled - ENABLED_CLOUDWATCHLOGS_EXPORTS=$($AWSCLI rds describe-db-instances $PROFILE_OPT --region $regx --db-instance-identifier $rdsinstance --query 'DBInstances[*].EnabledCloudwatchLogsExports' --output text) - if [[ $ENABLED_CLOUDWATCHLOGS_EXPORTS ]]; then - textPass "$regx: RDS instance $rdsinstance is shipping $ENABLED_CLOUDWATCHLOGS_EXPORTS to CloudWatch Logs" "$regx" "$rdsinstance" - else - textFail "$regx: RDS instance $rdsinstance has no CloudWatch Logs enabled!" "$regx" "$rdsinstance" - fi - done - else - textInfo "$regx: No RDS instances found" "$regx" "$rdsinstance" - fi - done -} diff --git a/providers/aws/services/rds/check_extra78 b/providers/aws/services/rds/check_extra78 deleted file mode 100644 index ad8ec1bb..00000000 --- a/providers/aws/services/rds/check_extra78 +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env bash - -# Prowler - the handy cloud security tool (copyright 2018) by Toni de la Fuente -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -CHECK_ID_extra78="7.8" -CHECK_TITLE_extra78="[extra78] Ensure there are no Public Accessible RDS instances" -CHECK_SCORED_extra78="NOT_SCORED" -CHECK_CIS_LEVEL_extra78="EXTRA" -CHECK_SEVERITY_extra78="Critical" -CHECK_ASFF_RESOURCE_TYPE_extra78="AwsRdsDbInstance" -CHECK_ALTERNATE_extra708="extra78" -CHECK_ALTERNATE_check78="extra78" -CHECK_ALTERNATE_check708="extra78" -CHECK_SERVICENAME_extra78="rds" -CHECK_RISK_extra78='Publicly accessible databases could expose sensitive data to bad actors.' -CHECK_REMEDIATION_extra78='Using an AWS Config rule check for RDS public instances periodically and check there is a business reason for it.' -CHECK_DOC_extra78='https://docs.amazonaws.cn/en_us/config/latest/developerguide/rds-instance-public-access-check.html' -CHECK_CAF_EPIC_extra78='Data Protection' - -extra78(){ - # "Ensure there are no Public Accessible RDS instances " - for regx in $REGIONS; do - LIST_OF_RDS_PUBLIC_INSTANCES=$($AWSCLI rds describe-db-instances $PROFILE_OPT --region $regx --query 'DBInstances[?PubliclyAccessible==`true` && DBInstanceStatus==`"available"`].[DBInstanceIdentifier,Endpoint.Address]' --output text 2>&1) - if [[ $(echo "$LIST_OF_RDS_PUBLIC_INSTANCES" | grep AccessDenied) ]]; then - textInfo "$regx: Access Denied Trying to describe DB instances" "$regx" - continue - fi - if [[ $LIST_OF_RDS_PUBLIC_INSTANCES ]];then - while read -r rds_instance;do - RDS_NAME=$(echo $rds_instance | awk '{ print $1; }') - RDS_DNSNAME=$(echo $rds_instance | awk '{ print $2; }') - textFail "$regx: RDS instance: $RDS_NAME at $RDS_DNSNAME is set as Publicly Accessible!" "$regx" "$RDS_NAME" - done <<< "$LIST_OF_RDS_PUBLIC_INSTANCES" - else - textPass "$regx: no Publicly Accessible RDS instances found" "$regx" "$RDS_NAME" - fi - done -} diff --git a/providers/aws/services/rds/rds_client.py b/providers/aws/services/rds/rds_client.py new file mode 100644 index 00000000..d8462d27 --- /dev/null +++ b/providers/aws/services/rds/rds_client.py @@ -0,0 +1,4 @@ +from providers.aws.lib.audit_info.audit_info import current_audit_info +from providers.aws.services.rds.rds_service import RDS + +rds_client = RDS(current_audit_info) diff --git a/providers/aws/services/rds/rds_instance_backup_enabled/__init__.py b/providers/aws/services/rds/rds_instance_backup_enabled/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled.metadata.json b/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled.metadata.json new file mode 100644 index 00000000..2a607a48 --- /dev/null +++ b/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled.metadata.json @@ -0,0 +1,35 @@ +{ + "Provider": "aws", + "CheckID": "rds_instance_backup_enabled", + "CheckTitle": "Check if RDS instances have backup enabled.", + "CheckType": [], + "ServiceName": "rds", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:rds:region:account-id:db-instance", + "Severity": "medium", + "ResourceType": "AwsRdsDbInstance", + "Description": "Check if RDS instances have backup enabled.", + "Risk": "If backup is not enabled, data is vulnerable. Human error or bad actors could erase or modify data.", + "RelatedUrl": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html", + "Remediation": { + "Code": { + "CLI": "aws rds modify-db-instance --db-instance-identifier --backup-retention-period 7 --apply-immediately", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/RDS/rds-automated-backups-enabled.html", + "Terraform": "https://docs.bridgecrew.io/docs/ensure-that-rds-instances-have-backup-policy#terraform" + }, + "Recommendation": { + "Text": "Enable automated backup for production data. Define a retention period and periodically test backup restoration. A Disaster Recovery process should be in place to govern Data Protection approach.", + "Url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "", + "Compliance": [] +} diff --git a/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled.py b/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled.py new file mode 100644 index 00000000..c3f86a0b --- /dev/null +++ b/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled.py @@ -0,0 +1,23 @@ +from lib.check.models import Check, Check_Report +from providers.aws.services.rds.rds_client import rds_client + + +class rds_instance_backup_enabled(Check): + def execute(self): + findings = [] + for db_instance in rds_client.db_instances: + report = Check_Report(self.metadata) + report.region = db_instance.region + report.resource_id = db_instance.id + if db_instance.backup_retention_period > 0: + report.status = "PASS" + report.status_extended = f"RDS Instance {db_instance.id} has backup enabled with retention period {db_instance.backup_retention_period} days." + else: + report.status = "FAIL" + report.status_extended = ( + f"RDS Instance {db_instance.id} has not backup enabled." + ) + + findings.append(report) + + return findings diff --git a/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled_test.py b/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled_test.py new file mode 100644 index 00000000..db4a21de --- /dev/null +++ b/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled_test.py @@ -0,0 +1,103 @@ +from re import search +from unittest import mock + +from boto3 import client +from moto import mock_rds + +AWS_REGION = "us-east-1" + + +class Test_rds_instance_backup_enabled: + @mock_rds + def test_rds_no_instances(self): + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_backup_enabled.rds_instance_backup_enabled.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_backup_enabled.rds_instance_backup_enabled import ( + rds_instance_backup_enabled, + ) + + check = rds_instance_backup_enabled() + result = check.execute() + + assert len(result) == 0 + + @mock_rds + def test_rds_instance_no_backup(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_backup_enabled.rds_instance_backup_enabled.rds_client", + new=RDS(current_audit_info), + ) as service_client: + # Test Check + from providers.aws.services.rds.rds_instance_backup_enabled.rds_instance_backup_enabled import ( + rds_instance_backup_enabled, + ) + + service_client.db_instances[0].backup_retention_period = 0 + + check = rds_instance_backup_enabled() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "has not backup enabled", + result[0].status_extended, + ) + assert result[0].resource_id == "db-master-1" + + @mock_rds + def test_rds_instance_with_backup(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + BackupRetentionPeriod=10, + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_backup_enabled.rds_instance_backup_enabled.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_backup_enabled.rds_instance_backup_enabled import ( + rds_instance_backup_enabled, + ) + + check = rds_instance_backup_enabled() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "has backup enabled", + result[0].status_extended, + ) + assert result[0].resource_id == "db-master-1" diff --git a/providers/aws/services/rds/rds_instance_deletion_protection/__init__.py b/providers/aws/services/rds/rds_instance_deletion_protection/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection.metadata.json b/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection.metadata.json new file mode 100644 index 00000000..997b0094 --- /dev/null +++ b/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection.metadata.json @@ -0,0 +1,35 @@ +{ + "Provider": "aws", + "CheckID": "rds_instance_deletion_protection", + "CheckTitle": "Check if RDS instances have deletion protection enabled.", + "CheckType": [], + "ServiceName": "rds", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:rds:region:account-id:db-instance", + "Severity": "medium", + "ResourceType": "AwsRdsDbInstance", + "Description": "Check if RDS instances have deletion protection enabled.", + "Risk": "You can only delete instances that do not have deletion protection enabled.", + "RelatedUrl": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html", + "Remediation": { + "Code": { + "CLI": "aws rds modify-db-instance --db-instance-identifier --deletion-protection --apply-immediately", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/RDS/instance-deletion-protection.html", + "Terraform": "https://docs.bridgecrew.io/docs/ensure-that-rds-clusters-and-instances-have-deletion-protection-enabled#terraform" + }, + "Recommendation": { + "Text": "Enable deletion protection using the AWS Management Console for production DB instances.", + "Url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "", + "Compliance": [] +} diff --git a/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection.py b/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection.py new file mode 100644 index 00000000..02eefd78 --- /dev/null +++ b/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection.py @@ -0,0 +1,25 @@ +from lib.check.models import Check, Check_Report +from providers.aws.services.rds.rds_client import rds_client + + +class rds_instance_deletion_protection(Check): + def execute(self): + findings = [] + for db_instance in rds_client.db_instances: + report = Check_Report(self.metadata) + report.region = db_instance.region + report.resource_id = db_instance.id + if db_instance.deletion_protection: + report.status = "PASS" + report.status_extended = ( + f"RDS Instance {db_instance.id} deletion protection is enabled." + ) + else: + report.status = "FAIL" + report.status_extended = ( + f"RDS Instance {db_instance.id} deletion protection is not enabled." + ) + + findings.append(report) + + return findings diff --git a/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection_test.py b/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection_test.py new file mode 100644 index 00000000..c38ae37d --- /dev/null +++ b/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection_test.py @@ -0,0 +1,101 @@ +from re import search +from unittest import mock + +from boto3 import client +from moto import mock_rds + +AWS_REGION = "us-east-1" + + +class Test_rds_instance_deletion_protection: + @mock_rds + def test_rds_no_instances(self): + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_deletion_protection.rds_instance_deletion_protection.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_deletion_protection.rds_instance_deletion_protection import ( + rds_instance_deletion_protection, + ) + + check = rds_instance_deletion_protection() + result = check.execute() + + assert len(result) == 0 + + @mock_rds + def test_rds_instance_no_deletion_protection(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_deletion_protection.rds_instance_deletion_protection.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_deletion_protection.rds_instance_deletion_protection import ( + rds_instance_deletion_protection, + ) + + check = rds_instance_deletion_protection() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "deletion protection is not enabled", + result[0].status_extended, + ) + assert result[0].resource_id == "db-master-1" + + @mock_rds + def test_rds_instance_with_encryption(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + DeletionProtection=True, + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_deletion_protection.rds_instance_deletion_protection.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_deletion_protection.rds_instance_deletion_protection import ( + rds_instance_deletion_protection, + ) + + check = rds_instance_deletion_protection() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "deletion protection is enabled", + result[0].status_extended, + ) + assert result[0].resource_id == "db-master-1" diff --git a/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/__init__.py b/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled.metadata.json b/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled.metadata.json new file mode 100644 index 00000000..7b4b26f1 --- /dev/null +++ b/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled.metadata.json @@ -0,0 +1,35 @@ +{ + "Provider": "aws", + "CheckID": "rds_instance_enhanced_monitoring_enabled", + "CheckTitle": "Check if RDS instances has enhanced monitoring enabled.", + "CheckType": [], + "ServiceName": "rds", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:rds:region:account-id:db-instance", + "Severity": "low", + "ResourceType": "AwsRdsDbInstance", + "Description": "Check if RDS instances has enhanced monitoring enabled.", + "Risk": "A smaller monitoring interval results in more frequent reporting of OS metrics.", + "RelatedUrl": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html", + "Remediation": { + "Code": { + "CLI": "aws rds create-db-instance --db-instance-identifier --db-instance-class --engine --storage-encrypted true", + "NativeIaC": "", + "Other": "", + "Terraform": "https://docs.bridgecrew.io/docs/ensure-that-enhanced-monitoring-is-enabled-for-amazon-rds-instances#terraform" + }, + "Recommendation": { + "Text": "To use Enhanced Monitoring, you must create an IAM role; and then enable Enhanced Monitoring.", + "Url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "", + "Compliance": [] +} diff --git a/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled.py b/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled.py new file mode 100644 index 00000000..45b9c83e --- /dev/null +++ b/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled.py @@ -0,0 +1,23 @@ +from lib.check.models import Check, Check_Report +from providers.aws.services.rds.rds_client import rds_client + + +class rds_instance_enhanced_monitoring_enabled(Check): + def execute(self): + findings = [] + for db_instance in rds_client.db_instances: + report = Check_Report(self.metadata) + report.region = db_instance.region + report.resource_id = db_instance.id + if db_instance.enhanced_monitoring_arn: + report.status = "PASS" + report.status_extended = ( + f"RDS Instance {db_instance.id} has enhanced monitoring enabled." + ) + else: + report.status = "FAIL" + report.status_extended = f"RDS Instance {db_instance.id} does not have enhanced monitoring enabled." + + findings.append(report) + + return findings diff --git a/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled_test.py b/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled_test.py new file mode 100644 index 00000000..1081b9a2 --- /dev/null +++ b/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled_test.py @@ -0,0 +1,101 @@ +from re import search +from unittest import mock + +from boto3 import client +from moto import mock_rds + +AWS_REGION = "us-east-1" + + +class Test_rds_instance_enhanced_monitoring_enabled: + @mock_rds + def test_rds_no_instances(self): + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_enhanced_monitoring_enabled.rds_instance_enhanced_monitoring_enabled.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_enhanced_monitoring_enabled.rds_instance_enhanced_monitoring_enabled import ( + rds_instance_enhanced_monitoring_enabled, + ) + + check = rds_instance_enhanced_monitoring_enabled() + result = check.execute() + + assert len(result) == 0 + + @mock_rds + def test_rds_instance_no_monitoring(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_enhanced_monitoring_enabled.rds_instance_enhanced_monitoring_enabled.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_enhanced_monitoring_enabled.rds_instance_enhanced_monitoring_enabled import ( + rds_instance_enhanced_monitoring_enabled, + ) + + check = rds_instance_enhanced_monitoring_enabled() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "does not have enhanced monitoring enabled", + result[0].status_extended, + ) + assert result[0].resource_id == "db-master-1" + + @mock_rds + def test_rds_instance_with_monitoring(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_enhanced_monitoring_enabled.rds_instance_enhanced_monitoring_enabled.rds_client", + new=RDS(current_audit_info), + ) as service_client: + # Test Check + from providers.aws.services.rds.rds_instance_enhanced_monitoring_enabled.rds_instance_enhanced_monitoring_enabled import ( + rds_instance_enhanced_monitoring_enabled, + ) + + service_client.db_instances[0].enhanced_monitoring_arn = "log-stream" + check = rds_instance_enhanced_monitoring_enabled() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "has enhanced monitoring enabled", + result[0].status_extended, + ) + assert result[0].resource_id == "db-master-1" diff --git a/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/__init__.py b/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs.metadata.json b/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs.metadata.json new file mode 100644 index 00000000..c02facbc --- /dev/null +++ b/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs.metadata.json @@ -0,0 +1,35 @@ +{ + "Provider": "aws", + "CheckID": "rds_instance_integration_cloudwatch_logs", + "CheckTitle": "Check if RDS instances is integrated with CloudWatch Logs.", + "CheckType": [], + "ServiceName": "rds", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:rds:region:account-id:db-instance", + "Severity": "medium", + "ResourceType": "AwsRdsDbInstance", + "Description": "Check if RDS instances is integrated with CloudWatch Logs.", + "Risk": "If logs are not enabled, monitoring of service use and threat analysis is not possible.", + "RelatedUrl": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/publishing_cloudwatchlogs.html", + "Remediation": { + "Code": { + "CLI": "aws rds modify-db-instance --db-instance-identifier --cloudwatch-logs-export-configuration {'EnableLogTypes':['audit',error','general','slowquery']} --apply-immediately", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/RDS/log-exports.html", + "Terraform": "https://docs.bridgecrew.io/docs/ensure-that-respective-logs-of-amazon-relational-database-service-amazon-rds-are-enabled#terraform" + }, + "Recommendation": { + "Text": "Use CloudWatch Logs to perform real-time analysis of the log data. Create alarms and view metrics.", + "Url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/publishing_cloudwatchlogs.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "", + "Compliance": [] +} diff --git a/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs.py b/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs.py new file mode 100644 index 00000000..4db543ab --- /dev/null +++ b/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs.py @@ -0,0 +1,21 @@ +from lib.check.models import Check, Check_Report +from providers.aws.services.rds.rds_client import rds_client + + +class rds_instance_integration_cloudwatch_logs(Check): + def execute(self): + findings = [] + for db_instance in rds_client.db_instances: + report = Check_Report(self.metadata) + report.region = db_instance.region + report.resource_id = db_instance.id + if db_instance.cloudwatch_logs: + report.status = "PASS" + report.status_extended = f"RDS Instance {db_instance.id} is shipping {' '.join(db_instance.cloudwatch_logs)} to CloudWatch Logs." + else: + report.status = "FAIL" + report.status_extended = f"RDS Instance {db_instance.id} does not have CloudWatch Logs enabled." + + findings.append(report) + + return findings diff --git a/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs_test.py b/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs_test.py new file mode 100644 index 00000000..3a2b4081 --- /dev/null +++ b/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs_test.py @@ -0,0 +1,101 @@ +from re import search +from unittest import mock + +from boto3 import client +from moto import mock_rds + +AWS_REGION = "us-east-1" + + +class Test_rds_instance_integration_cloudwatch_logs: + @mock_rds + def test_rds_no_instances(self): + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_integration_cloudwatch_logs.rds_instance_integration_cloudwatch_logs.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_integration_cloudwatch_logs.rds_instance_integration_cloudwatch_logs import ( + rds_instance_integration_cloudwatch_logs, + ) + + check = rds_instance_integration_cloudwatch_logs() + result = check.execute() + + assert len(result) == 0 + + @mock_rds + def test_rds_instance_no_logs(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_integration_cloudwatch_logs.rds_instance_integration_cloudwatch_logs.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_integration_cloudwatch_logs.rds_instance_integration_cloudwatch_logs import ( + rds_instance_integration_cloudwatch_logs, + ) + + check = rds_instance_integration_cloudwatch_logs() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "does not have CloudWatch Logs enabled", + result[0].status_extended, + ) + assert result[0].resource_id == "db-master-1" + + @mock_rds + def test_rds_instance_with_logs(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + EnableCloudwatchLogsExports=["audit", "error"], + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_integration_cloudwatch_logs.rds_instance_integration_cloudwatch_logs.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_integration_cloudwatch_logs.rds_instance_integration_cloudwatch_logs import ( + rds_instance_integration_cloudwatch_logs, + ) + + check = rds_instance_integration_cloudwatch_logs() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "is shipping audit error to CloudWatch Logs", + result[0].status_extended, + ) + assert result[0].resource_id == "db-master-1" diff --git a/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/__init__.py b/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled.metadata.json b/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled.metadata.json new file mode 100644 index 00000000..8bf46ab8 --- /dev/null +++ b/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled.metadata.json @@ -0,0 +1,35 @@ +{ + "Provider": "aws", + "CheckID": "rds_instance_minor_version_upgrade_enabled", + "CheckTitle": "Ensure RDS instances have minor version upgrade enabled.", + "CheckType": [], + "ServiceName": "rds", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:rds:region:account-id:db-instance", + "Severity": "low", + "ResourceType": "AwsRdsDbInstance", + "Description": "Ensure RDS instances have minor version upgrade enabled.", + "Risk": "Auto Minor Version Upgrade is a feature that you can enable to have your database automatically upgraded when a new minor database engine version is available. Minor version upgrades often patch security vulnerabilities and fix bugs and therefore should be applied.", + "RelatedUrl": "https://aws.amazon.com/blogs/database/best-practices-for-upgrading-amazon-rds-to-major-and-minor-versions-of-postgresql/", + "Remediation": { + "Code": { + "CLI": "aws rds modify-db-instance --db-instance-identifier --auto-minor-version-upgrade --apply-immediately", + "NativeIaC": "https://docs.bridgecrew.io/docs/ensure-aws-db-instance-gets-all-minor-upgrades-automatically#cloudformation", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/RDS/rds-auto-minor-version-upgrade.html", + "Terraform": "https://docs.bridgecrew.io/docs/ensure-aws-db-instance-gets-all-minor-upgrades-automatically#terraform" + }, + "Recommendation": { + "Text": "Enable auto minor version upgrade for all databases and environments.", + "Url": "https://aws.amazon.com/blogs/database/best-practices-for-upgrading-amazon-rds-to-major-and-minor-versions-of-postgresql/" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "", + "Compliance": [] +} diff --git a/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled.py b/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled.py new file mode 100644 index 00000000..4c22163b --- /dev/null +++ b/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled.py @@ -0,0 +1,23 @@ +from lib.check.models import Check, Check_Report +from providers.aws.services.rds.rds_client import rds_client + + +class rds_instance_minor_version_upgrade_enabled(Check): + def execute(self): + findings = [] + for db_instance in rds_client.db_instances: + report = Check_Report(self.metadata) + report.region = db_instance.region + report.resource_id = db_instance.id + if db_instance.auto_minor_version_upgrade: + report.status = "PASS" + report.status_extended = ( + f"RDS Instance {db_instance.id} has minor version upgrade enabled." + ) + else: + report.status = "FAIL" + report.status_extended = f"RDS Instance {db_instance.id} does not have minor version upgrade enabled." + + findings.append(report) + + return findings diff --git a/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled_test.py b/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled_test.py new file mode 100644 index 00000000..7bc4d2d4 --- /dev/null +++ b/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled_test.py @@ -0,0 +1,101 @@ +from re import search +from unittest import mock + +from boto3 import client +from moto import mock_rds + +AWS_REGION = "us-east-1" + + +class Test_rds_instance_minor_version_upgrade_enabled: + @mock_rds + def test_rds_no_instances(self): + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_minor_version_upgrade_enabled.rds_instance_minor_version_upgrade_enabled.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_minor_version_upgrade_enabled.rds_instance_minor_version_upgrade_enabled import ( + rds_instance_minor_version_upgrade_enabled, + ) + + check = rds_instance_minor_version_upgrade_enabled() + result = check.execute() + + assert len(result) == 0 + + @mock_rds + def test_rds_instance_no_auto_upgrade(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_minor_version_upgrade_enabled.rds_instance_minor_version_upgrade_enabled.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_minor_version_upgrade_enabled.rds_instance_minor_version_upgrade_enabled import ( + rds_instance_minor_version_upgrade_enabled, + ) + + check = rds_instance_minor_version_upgrade_enabled() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "does not have minor version upgrade enabled", + result[0].status_extended, + ) + assert result[0].resource_id == "db-master-1" + + @mock_rds + def test_rds_instance_with_auto_upgrade(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + AutoMinorVersionUpgrade=True, + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_minor_version_upgrade_enabled.rds_instance_minor_version_upgrade_enabled.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_minor_version_upgrade_enabled.rds_instance_minor_version_upgrade_enabled import ( + rds_instance_minor_version_upgrade_enabled, + ) + + check = rds_instance_minor_version_upgrade_enabled() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "has minor version upgrade enabled", + result[0].status_extended, + ) + assert result[0].resource_id == "db-master-1" diff --git a/providers/aws/services/rds/rds_instance_multi_az/__init__.py b/providers/aws/services/rds/rds_instance_multi_az/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az.metadata.json b/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az.metadata.json new file mode 100644 index 00000000..12f5ccb5 --- /dev/null +++ b/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az.metadata.json @@ -0,0 +1,35 @@ +{ + "Provider": "aws", + "CheckID": "rds_instance_multi_az", + "CheckTitle": "Check if RDS instances have multi-AZ enabled.", + "CheckType": [], + "ServiceName": "rds", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:rds:region:account-id:db-instance", + "Severity": "medium", + "ResourceType": "AwsRdsDbInstance", + "Description": "Check if RDS instances have multi-AZ enabled.", + "Risk": "In case of failure, with a single-AZ deployment configuration, should an availability zone specific database failure occur, Amazon RDS can not automatically fail over to the standby availability zone.", + "RelatedUrl": "https://aws.amazon.com/rds/features/multi-az/", + "Remediation": { + "Code": { + "CLI": "aws rds create-db-instance --db-instance-identifier --multi-az true", + "NativeIaC": "https://docs.bridgecrew.io/docs/general_73#cloudformation", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/RDS/rds-multi-az.html", + "Terraform": "https://docs.bridgecrew.io/docs/general_73#terraform" + }, + "Recommendation": { + "Text": "Enable multi-AZ deployment for production databases.", + "Url": "https://aws.amazon.com/rds/features/multi-az/" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "", + "Compliance": [] +} diff --git a/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az.py b/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az.py new file mode 100644 index 00000000..0b77f0c2 --- /dev/null +++ b/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az.py @@ -0,0 +1,25 @@ +from lib.check.models import Check, Check_Report +from providers.aws.services.rds.rds_client import rds_client + + +class rds_instance_multi_az(Check): + def execute(self): + findings = [] + for db_instance in rds_client.db_instances: + report = Check_Report(self.metadata) + report.region = db_instance.region + report.resource_id = db_instance.id + if db_instance.multi_az: + report.status = "PASS" + report.status_extended = ( + f"RDS Instance {db_instance.id} has multi-AZ enabled." + ) + else: + report.status = "FAIL" + report.status_extended = ( + f"RDS Instance {db_instance.id} does not have multi-AZ enabled." + ) + + findings.append(report) + + return findings diff --git a/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az_test.py b/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az_test.py new file mode 100644 index 00000000..5168963c --- /dev/null +++ b/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az_test.py @@ -0,0 +1,101 @@ +from re import search +from unittest import mock + +from boto3 import client +from moto import mock_rds + +AWS_REGION = "us-east-1" + + +class Test_rds_instance_multi_az: + @mock_rds + def test_rds_no_instances(self): + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_multi_az.rds_instance_multi_az.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_multi_az.rds_instance_multi_az import ( + rds_instance_multi_az, + ) + + check = rds_instance_multi_az() + result = check.execute() + + assert len(result) == 0 + + @mock_rds + def test_rds_instance_no_multi_az(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_multi_az.rds_instance_multi_az.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_multi_az.rds_instance_multi_az import ( + rds_instance_multi_az, + ) + + check = rds_instance_multi_az() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "does not have multi-AZ enabled", + result[0].status_extended, + ) + assert result[0].resource_id == "db-master-1" + + @mock_rds + def test_rds_instance_multi_az(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + MultiAZ=True, + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_multi_az.rds_instance_multi_az.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_multi_az.rds_instance_multi_az import ( + rds_instance_multi_az, + ) + + check = rds_instance_multi_az() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "has multi-AZ enabled", + result[0].status_extended, + ) + assert result[0].resource_id == "db-master-1" diff --git a/providers/aws/services/rds/rds_instance_no_public_access/__init__.py b/providers/aws/services/rds/rds_instance_no_public_access/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access.metadata.json b/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access.metadata.json new file mode 100644 index 00000000..57a76214 --- /dev/null +++ b/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access.metadata.json @@ -0,0 +1,35 @@ +{ + "Provider": "aws", + "CheckID": "rds_instance_no_public_access", + "CheckTitle": "Ensure there are no Public Accessible RDS instances.", + "CheckType": [], + "ServiceName": "rds", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:rds:region:account-id:db-instance", + "Severity": "critical", + "ResourceType": "AwsRdsDbInstance", + "Description": "Ensure there are no Public Accessible RDS instances.", + "Risk": "Publicly accessible databases could expose sensitive data to bad actors.", + "RelatedUrl": "https://docs.amazonaws.cn/en_us/config/latest/developerguide/rds-instance-public-access-check.html", + "Remediation": { + "Code": { + "CLI": "aws rds modify-db-instance --db-instance-identifier --no-publicly-accessible --apply-immediately", + "NativeIaC": "https://docs.bridgecrew.io/docs/public_2#cloudformation", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/RDS/rds-publicly-accessible.html", + "Terraform": "https://docs.bridgecrew.io/docs/public_2#terraform" + }, + "Recommendation": { + "Text": "Using an AWS Config rule check for RDS public instances periodically and check there is a business reason for it.", + "Url": "https://docs.amazonaws.cn/en_us/config/latest/developerguide/rds-instance-public-access-check.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "", + "Compliance": [] +} diff --git a/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access.py b/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access.py new file mode 100644 index 00000000..b1113bea --- /dev/null +++ b/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access.py @@ -0,0 +1,25 @@ +from lib.check.models import Check, Check_Report +from providers.aws.services.rds.rds_client import rds_client + + +class rds_instance_no_public_access(Check): + def execute(self): + findings = [] + for db_instance in rds_client.db_instances: + report = Check_Report(self.metadata) + report.region = db_instance.region + report.resource_id = db_instance.id + if not db_instance.public: + report.status = "PASS" + report.status_extended = ( + f"RDS Instance {db_instance.id} is not Publicly Accessible." + ) + else: + report.status = "FAIL" + report.status_extended = ( + f"RDS Instance {db_instance.id} is set as Publicly Accessible." + ) + + findings.append(report) + + return findings diff --git a/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access_test.py b/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access_test.py new file mode 100644 index 00000000..d9b678cb --- /dev/null +++ b/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access_test.py @@ -0,0 +1,101 @@ +from re import search +from unittest import mock + +from boto3 import client +from moto import mock_rds + +AWS_REGION = "us-east-1" + + +class Test_rds_instance_no_public_access: + @mock_rds + def test_rds_no_instances(self): + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_no_public_access.rds_instance_no_public_access.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_no_public_access.rds_instance_no_public_access import ( + rds_instance_no_public_access, + ) + + check = rds_instance_no_public_access() + result = check.execute() + + assert len(result) == 0 + + @mock_rds + def test_rds_instance_private(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_no_public_access.rds_instance_no_public_access.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_no_public_access.rds_instance_no_public_access import ( + rds_instance_no_public_access, + ) + + check = rds_instance_no_public_access() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "is not Publicly Accessible", + result[0].status_extended, + ) + assert result[0].resource_id == "db-master-1" + + @mock_rds + def test_rds_instance_public(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + PubliclyAccessible=True, + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_no_public_access.rds_instance_no_public_access.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_no_public_access.rds_instance_no_public_access import ( + rds_instance_no_public_access, + ) + + check = rds_instance_no_public_access() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "is set as Publicly Accessible", + result[0].status_extended, + ) + assert result[0].resource_id == "db-master-1" diff --git a/providers/aws/services/rds/rds_instance_storage_encrypted/__init__.py b/providers/aws/services/rds/rds_instance_storage_encrypted/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted.metadata.json b/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted.metadata.json new file mode 100644 index 00000000..17b7933c --- /dev/null +++ b/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted.metadata.json @@ -0,0 +1,35 @@ +{ + "Provider": "aws", + "CheckID": "rds_instance_storage_encrypted", + "CheckTitle": "Check if RDS instances storage is encrypted.", + "CheckType": [], + "ServiceName": "rds", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:rds:region:account-id:db-instance", + "Severity": "medium", + "ResourceType": "AwsRdsDbInstance", + "Description": "Check if RDS instances storage is encrypted.", + "Risk": "If not enabled sensitive information at rest is not protected.", + "RelatedUrl": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Encryption.html", + "Remediation": { + "Code": { + "CLI": "aws rds create-db-instance --db-instance-identifier --db-instance-class --engine --storage-encrypted true", + "NativeIaC": "https://docs.bridgecrew.io/docs/general_4#cloudformation", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/RDS/rds-encryption-enabled.html", + "Terraform": "https://docs.bridgecrew.io/docs/general_4#terraform" + }, + "Recommendation": { + "Text": "Enable Encryption. Use a CMK where possible. It will provide additional management and privacy benefits.", + "Url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Encryption.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "", + "Compliance": [] +} diff --git a/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted.py b/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted.py new file mode 100644 index 00000000..7cea76cc --- /dev/null +++ b/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted.py @@ -0,0 +1,23 @@ +from lib.check.models import Check, Check_Report +from providers.aws.services.rds.rds_client import rds_client + + +class rds_instance_storage_encrypted(Check): + def execute(self): + findings = [] + for db_instance in rds_client.db_instances: + report = Check_Report(self.metadata) + report.region = db_instance.region + report.resource_id = db_instance.id + if db_instance.encrypted: + report.status = "PASS" + report.status_extended = f"RDS Instance {db_instance.id} is encrypted." + else: + report.status = "FAIL" + report.status_extended = ( + f"RDS Instance {db_instance.id} is not encrypted." + ) + + findings.append(report) + + return findings diff --git a/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted_test.py b/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted_test.py new file mode 100644 index 00000000..7d3bae30 --- /dev/null +++ b/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted_test.py @@ -0,0 +1,101 @@ +from re import search +from unittest import mock + +from boto3 import client +from moto import mock_rds + +AWS_REGION = "us-east-1" + + +class Test_rds_instance_storage_encrypted: + @mock_rds + def test_rds_no_instances(self): + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_storage_encrypted.rds_instance_storage_encrypted.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_storage_encrypted.rds_instance_storage_encrypted import ( + rds_instance_storage_encrypted, + ) + + check = rds_instance_storage_encrypted() + result = check.execute() + + assert len(result) == 0 + + @mock_rds + def test_rds_instance_no_encryption(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_storage_encrypted.rds_instance_storage_encrypted.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_storage_encrypted.rds_instance_storage_encrypted import ( + rds_instance_storage_encrypted, + ) + + check = rds_instance_storage_encrypted() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "is not encrypted", + result[0].status_extended, + ) + assert result[0].resource_id == "db-master-1" + + @mock_rds + def test_rds_instance_with_encryption(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + StorageEncrypted=True, + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_instance_storage_encrypted.rds_instance_storage_encrypted.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_instance_storage_encrypted.rds_instance_storage_encrypted import ( + rds_instance_storage_encrypted, + ) + + check = rds_instance_storage_encrypted() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "is encrypted", + result[0].status_extended, + ) + assert result[0].resource_id == "db-master-1" diff --git a/providers/aws/services/rds/rds_service.py b/providers/aws/services/rds/rds_service.py new file mode 100644 index 00000000..31402e67 --- /dev/null +++ b/providers/aws/services/rds/rds_service.py @@ -0,0 +1,176 @@ +import threading +from typing import Optional + +from pydantic import BaseModel + +from lib.logger import logger +from providers.aws.aws_provider import generate_regional_clients + + +################## RDS +class RDS: + def __init__(self, audit_info): + self.service = "rds" + self.session = audit_info.audit_session + self.audited_account = audit_info.audited_account + self.regional_clients = generate_regional_clients(self.service, audit_info) + self.db_instances = [] + self.db_snapshots = [] + self.db_cluster_snapshots = [] + self.__threading_call__(self.__describe_db_instances__) + self.__threading_call__(self.__describe_db_snapshots__) + self.__threading_call__(self.__describe_db_snapshot_attributes__) + self.__threading_call__(self.__describe_db_cluster_snapshots__) + self.__threading_call__(self.__describe_db_cluster_snapshot_attributes__) + + def __get_session__(self): + return self.session + + def __threading_call__(self, call): + threads = [] + for regional_client in self.regional_clients.values(): + threads.append(threading.Thread(target=call, args=(regional_client,))) + for t in threads: + t.start() + for t in threads: + t.join() + + def __describe_db_instances__(self, regional_client): + logger.info("RDS - Describe Instances...") + try: + describe_db_instances_paginator = regional_client.get_paginator( + "describe_db_instances" + ) + for page in describe_db_instances_paginator.paginate(): + for instance in page["DBInstances"]: + self.db_instances.append( + DBInstance( + id=instance["DBInstanceIdentifier"], + endpoint=instance["Endpoint"]["Address"], + status=instance["DBInstanceStatus"], + public=instance["PubliclyAccessible"], + encrypted=instance["StorageEncrypted"], + auto_minor_version_upgrade=instance[ + "AutoMinorVersionUpgrade" + ], + backup_retention_period=instance.get( + "BackupRetentionPeriod" + ), + cloudwatch_logs=instance.get( + "EnabledCloudwatchLogsExports" + ), + deletion_protection=instance["DeletionProtection"], + enhanced_monitoring_arn=instance.get( + "EnhancedMonitoringResourceArn" + ), + multi_az=instance["MultiAZ"], + region=regional_client.region, + ) + ) + except Exception as error: + logger.error( + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + + def __describe_db_snapshots__(self, regional_client): + logger.info("RDS - Describe Snapshots...") + try: + describe_db_snapshots_paginator = regional_client.get_paginator( + "describe_db_snapshots" + ) + for page in describe_db_snapshots_paginator.paginate(): + for snapshot in page["DBSnapshots"]: + self.db_snapshots.append( + DBSnapshot( + id=snapshot["DBSnapshotIdentifier"], + instance_id=snapshot["DBInstanceIdentifier"], + region=regional_client.region, + ) + ) + except Exception as error: + logger.error( + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + + def __describe_db_snapshot_attributes__(self, regional_client): + logger.info("RDS - Describe Snapshot Attributes...") + try: + for snapshot in self.db_snapshots: + if snapshot.region == regional_client.region: + response = regional_client.describe_db_snapshot_attributes( + DBSnapshotIdentifier=snapshot.id + )["DBSnapshotAttributesResult"] + for att in response["DBSnapshotAttributes"]: + if "all" in att["AttributeValues"]: + snapshot.public = True + + except Exception as error: + logger.error( + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + + def __describe_db_cluster_snapshots__(self, regional_client): + logger.info("RDS - Describe Cluster Snapshots...") + try: + describe_db_snapshots_paginator = regional_client.get_paginator( + "describe_db_cluster_snapshots" + ) + for page in describe_db_snapshots_paginator.paginate(): + for snapshot in page["DBClusterSnapshots"]: + self.db_cluster_snapshots.append( + ClusterSnapshot( + id=snapshot["DBClusterSnapshotIdentifier"], + cluster_id=snapshot["DBClusterIdentifier"], + region=regional_client.region, + ) + ) + except Exception as error: + logger.error( + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + + def __describe_db_cluster_snapshot_attributes__(self, regional_client): + logger.info("RDS - Describe Cluster Snapshot Attributes...") + try: + for snapshot in self.db_cluster_snapshots: + if snapshot.region == regional_client.region: + response = regional_client.describe_db_cluster_snapshot_attributes( + DBClusterSnapshotIdentifier=snapshot.id + )["DBClusterSnapshotAttributesResult"] + for att in response["DBClusterSnapshotAttributes"]: + if "all" in att["AttributeValues"]: + snapshot.public = True + + except Exception as error: + logger.error( + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + + +class DBInstance(BaseModel): + id: str + endpoint: str + status: str + public: bool + encrypted: bool + backup_retention_period: int = 0 + cloudwatch_logs: Optional[list] + deletion_protection: bool + auto_minor_version_upgrade: bool + enhanced_monitoring_arn: Optional[str] + multi_az: bool + region: str + + +class DBSnapshot(BaseModel): + id: str + instance_id: str + public: bool = False + region: str + + +class ClusterSnapshot(BaseModel): + id: str + cluster_id: str + public: bool = False + region: str diff --git a/providers/aws/services/rds/rds_service_test.py b/providers/aws/services/rds/rds_service_test.py new file mode 100644 index 00000000..409479db --- /dev/null +++ b/providers/aws/services/rds/rds_service_test.py @@ -0,0 +1,150 @@ +from boto3 import client, session +from moto import mock_rds + +from providers.aws.lib.audit_info.models import AWS_Audit_Info +from providers.aws.services.rds.rds_service import RDS + +AWS_ACCOUNT_NUMBER = 123456789012 +AWS_REGION = "us-east-1" + + +class Test_RDS_Service: + # Mocked Audit Info + def set_mocked_audit_info(self): + audit_info = AWS_Audit_Info( + original_session=None, + audit_session=session.Session( + profile_name=None, + botocore_session=None, + ), + audited_account=AWS_ACCOUNT_NUMBER, + audited_user_id=None, + audited_partition="aws", + audited_identity_arn=None, + profile=None, + profile_region=None, + credentials=None, + assumed_role_info=None, + audited_regions=None, + organizations_metadata=None, + ) + return audit_info + + # Test Dynamo Service + @mock_rds + def test_service(self): + # Dynamo client for this test class + audit_info = self.set_mocked_audit_info() + rds = RDS(audit_info) + assert rds.service == "rds" + + # Test Dynamo Client + @mock_rds + def test_client(self): + # Dynamo client for this test class + audit_info = self.set_mocked_audit_info() + rds = RDS(audit_info) + for regional_client in rds.regional_clients.values(): + assert regional_client.__class__.__name__ == "RDS" + + # Test Dynamo Session + @mock_rds + def test__get_session__(self): + # Dynamo client for this test class + audit_info = self.set_mocked_audit_info() + rds = RDS(audit_info) + assert rds.session.__class__.__name__ == "Session" + + # Test Dynamo Session + @mock_rds + def test_audited_account(self): + # Dynamo client for this test class + audit_info = self.set_mocked_audit_info() + rds = RDS(audit_info) + assert rds.audited_account == AWS_ACCOUNT_NUMBER + + # Test RDS Describe DB Instances + @mock_rds + def test__describe_db_instances__(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + StorageEncrypted=True, + DeletionProtection=True, + PubliclyAccessible=True, + AutoMinorVersionUpgrade=True, + BackupRetentionPeriod=10, + EnableCloudwatchLogsExports=["audit", "error"], + MultiAZ=True, + ) + # RDS client for this test class + audit_info = self.set_mocked_audit_info() + rds = RDS(audit_info) + assert len(rds.db_instances) == 1 + assert rds.db_instances[0].id == "db-master-1" + assert rds.db_instances[0].region == AWS_REGION + assert ( + rds.db_instances[0].endpoint + == "db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com" + ) + assert rds.db_instances[0].status == "available" + assert rds.db_instances[0].public + assert rds.db_instances[0].encrypted + assert rds.db_instances[0].backup_retention_period == 10 + assert rds.db_instances[0].cloudwatch_logs == ["audit", "error"] + assert rds.db_instances[0].deletion_protection + assert rds.db_instances[0].auto_minor_version_upgrade + assert rds.db_instances[0].multi_az + + # Test RDS Describe DB Snapshots + @mock_rds + def test__describe_db_snapshots__(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-primary-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + ) + + conn.create_db_snapshot( + DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1" + ) + # RDS client for this test class + audit_info = self.set_mocked_audit_info() + rds = RDS(audit_info) + assert len(rds.db_snapshots) == 1 + assert rds.db_snapshots[0].id == "snapshot-1" + assert rds.db_snapshots[0].instance_id == "db-primary-1" + assert rds.db_snapshots[0].region == AWS_REGION + assert not rds.db_snapshots[0].public + + # Test RDS Describe DB Cluster Snapshots + @mock_rds + def test__describe_db_cluster_snapshots__(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_cluster( + DBClusterIdentifier="db-primary-1", + AllocatedStorage=10, + Engine="postgres", + DBClusterInstanceClass="db.m1.small", + MasterUsername="root", + MasterUserPassword="hunter2000", + ) + + conn.create_db_cluster_snapshot( + DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1" + ) + # RDS client for this test class + audit_info = self.set_mocked_audit_info() + rds = RDS(audit_info) + assert len(rds.db_cluster_snapshots) == 1 + assert rds.db_cluster_snapshots[0].id == "snapshot-1" + assert rds.db_cluster_snapshots[0].cluster_id == "db-primary-1" + assert rds.db_cluster_snapshots[0].region == AWS_REGION + assert not rds.db_cluster_snapshots[0].public diff --git a/providers/aws/services/rds/rds_snapshots_public_access/__init__.py b/providers/aws/services/rds/rds_snapshots_public_access/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access.metadata.json b/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access.metadata.json new file mode 100644 index 00000000..13fd6f4f --- /dev/null +++ b/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access.metadata.json @@ -0,0 +1,35 @@ +{ + "Provider": "aws", + "CheckID": "rds_snapshots_public_access", + "CheckTitle": "Check if RDS Snapshots and Cluster Snapshots are public.", + "CheckType": [], + "ServiceName": "rds", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:rds:region:account-id:snapshot", + "Severity": "critical", + "ResourceType": "AwsRdsDbSnapshot", + "Description": "Check if RDS Snapshots and Cluster Snapshots are public.", + "Risk": "Publicly accessible services could expose sensitive data to bad actors. t is recommended that your RDS snapshots should not be public in order to prevent potential leak or misuse of sensitive data or any other kind of security threat. If your RDS snapshot is public, then the data which is backed up in that snapshot is accessible to all other AWS accounts.", + "RelatedUrl": "https://docs.aws.amazon.com/config/latest/developerguide/rds-snapshots-public-prohibited.html", + "Remediation": { + "Code": { + "CLI": "aws rds modify-db-snapshot-attribute --db-snapshot-identifier --attribute-name restore --values-to-remove all", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/RDS/public-snapshots.html", + "Terraform": "" + }, + "Recommendation": { + "Text": "Use AWS Config to identify any snapshot that is public.", + "Url": "https://docs.aws.amazon.com/config/latest/developerguide/rds-snapshots-public-prohibited.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "", + "Compliance": [] +} diff --git a/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access.py b/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access.py new file mode 100644 index 00000000..362ea3af --- /dev/null +++ b/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access.py @@ -0,0 +1,40 @@ +from lib.check.models import Check, Check_Report +from providers.aws.services.rds.rds_client import rds_client + + +class rds_snapshots_public_access(Check): + def execute(self): + findings = [] + for db_snap in rds_client.db_snapshots: + report = Check_Report(self.metadata) + report.region = db_snap.region + report.resource_id = db_snap.id + if db_snap.public: + report.status = "FAIL" + report.status_extended = ( + f"RDS Instance Snapshot {db_snap.id} is public." + ) + else: + report.status = "PASS" + report.status_extended = ( + f"RDS Instance Snapshot {db_snap.id} is not shared." + ) + + findings.append(report) + + for db_snap in rds_client.db_cluster_snapshots: + report = Check_Report(self.metadata) + report.region = db_snap.region + report.resource_id = db_snap.id + if db_snap.public: + report.status = "FAIL" + report.status_extended = f"RDS Cluster Snapshot {db_snap.id} is public." + else: + report.status = "PASS" + report.status_extended = ( + f"RDS Cluster Snapshot {db_snap.id} is not shared." + ) + + findings.append(report) + + return findings diff --git a/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access_test.py b/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access_test.py new file mode 100644 index 00000000..78227a71 --- /dev/null +++ b/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access_test.py @@ -0,0 +1,190 @@ +from re import search +from unittest import mock + +from boto3 import client +from moto import mock_rds + +AWS_REGION = "us-east-1" + + +class Test_rds_snapshots_public_access: + @mock_rds + def test_rds_no_snapshots(self): + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_snapshots_public_access.rds_snapshots_public_access.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_snapshots_public_access.rds_snapshots_public_access import ( + rds_snapshots_public_access, + ) + + check = rds_snapshots_public_access() + result = check.execute() + + assert len(result) == 0 + + @mock_rds + def test_rds_private_snapshot(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-primary-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + ) + + conn.create_db_snapshot( + DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1" + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_snapshots_public_access.rds_snapshots_public_access.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_snapshots_public_access.rds_snapshots_public_access import ( + rds_snapshots_public_access, + ) + + check = rds_snapshots_public_access() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "is not shared", + result[0].status_extended, + ) + assert result[0].resource_id == "snapshot-1" + + @mock_rds + def test_rds_public_snapshot(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_instance( + DBInstanceIdentifier="db-primary-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + ) + + conn.create_db_snapshot( + DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1" + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_snapshots_public_access.rds_snapshots_public_access.rds_client", + new=RDS(current_audit_info), + ) as service_client: + # Test Check + from providers.aws.services.rds.rds_snapshots_public_access.rds_snapshots_public_access import ( + rds_snapshots_public_access, + ) + + service_client.db_snapshots[0].public = True + check = rds_snapshots_public_access() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "is public", + result[0].status_extended, + ) + assert result[0].resource_id == "snapshot-1" + + @mock_rds + def test_rds_cluster_private_snapshot(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_cluster( + DBClusterIdentifier="db-primary-1", + AllocatedStorage=10, + Engine="postgres", + DBClusterInstanceClass="db.m1.small", + MasterUsername="root", + MasterUserPassword="hunter2000", + ) + + conn.create_db_cluster_snapshot( + DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1" + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_snapshots_public_access.rds_snapshots_public_access.rds_client", + new=RDS(current_audit_info), + ): + # Test Check + from providers.aws.services.rds.rds_snapshots_public_access.rds_snapshots_public_access import ( + rds_snapshots_public_access, + ) + + check = rds_snapshots_public_access() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "is not shared", + result[0].status_extended, + ) + assert result[0].resource_id == "snapshot-1" + + @mock_rds + def test_rds_cluster_public_snapshot(self): + conn = client("rds", region_name=AWS_REGION) + conn.create_db_cluster( + DBClusterIdentifier="db-primary-1", + AllocatedStorage=10, + Engine="postgres", + DBClusterInstanceClass="db.m1.small", + MasterUsername="root", + MasterUserPassword="hunter2000", + ) + + conn.create_db_cluster_snapshot( + DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1" + ) + from providers.aws.lib.audit_info.audit_info import current_audit_info + from providers.aws.services.rds.rds_service import RDS + + current_audit_info.audited_partition = "aws" + + with mock.patch( + "providers.aws.services.rds.rds_snapshots_public_access.rds_snapshots_public_access.rds_client", + new=RDS(current_audit_info), + ) as service_client: + # Test Check + from providers.aws.services.rds.rds_snapshots_public_access.rds_snapshots_public_access import ( + rds_snapshots_public_access, + ) + + service_client.db_cluster_snapshots[0].public = True + check = rds_snapshots_public_access() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "is public", + result[0].status_extended, + ) + assert result[0].resource_id == "snapshot-1" diff --git a/providers/aws/services/s3/s3_account_level_public_access_blocks/__init__.py b/providers/aws/services/s3/s3_account_level_public_access_blocks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks.metadata.json b/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks.metadata.json new file mode 100644 index 00000000..5efea66d --- /dev/null +++ b/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks.metadata.json @@ -0,0 +1,37 @@ +{ + "Provider": "aws", + "CheckID": "s3_account_level_public_access_blocks", + "CheckTitle": "Check S3 Account Level Public Access Block.", + "CheckType": [ + "Data Protection" + ], + "ServiceName": "s3", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "high", + "ResourceType": "AwsS3Bucket", + "Description": "Check S3 Account Level Public Access Block.", + "Risk": "Public access policies may be applied to sensitive data buckets.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws s3control put-public-access-block --public-access-block-configuration BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true --account-id ", + "NativeIaC": "https://docs.bridgecrew.io/docs/bc_aws_s3_21#cloudformation", + "Other": "https://github.com/cloudmatos/matos/tree/master/remediations/aws/s3/s3control/block-public-access", + "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_s3_21#terraform" + }, + "Recommendation": { + "Text": "You can enable Public Access Block at the account level to prevent the exposure of your data stored in S3.", + "Url": "https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "", + "Compliance": [] +} diff --git a/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks.py b/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks.py new file mode 100644 index 00000000..e92b7a19 --- /dev/null +++ b/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks.py @@ -0,0 +1,24 @@ +from lib.check.models import Check, Check_Report +from providers.aws.services.s3.s3_client import s3_client +from providers.aws.services.s3.s3control_client import s3control_client + + +class s3_account_level_public_access_blocks(Check): + def execute(self): + findings = [] + report = Check_Report(self.metadata) + report.status = "FAIL" + report.status_extended = f"Block Public Access is not configured for the account {s3_client.audited_account}." + report.region = s3control_client.region + report.resource_id = s3_client.audited_account + if ( + s3control_client.account_public_access_block + and s3control_client.account_public_access_block.ignore_public_acls + and s3control_client.account_public_access_block.restrict_public_buckets + ): + report.status = "PASS" + report.status_extended = f"Block Public Access is configured for the account {s3_client.audited_account}." + + findings.append(report) + + return findings diff --git a/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks_test.py b/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks_test.py new file mode 100644 index 00000000..d007da43 --- /dev/null +++ b/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks_test.py @@ -0,0 +1,125 @@ +from unittest import mock + +from boto3 import client, session +from moto import mock_s3, mock_s3control + +from providers.aws.lib.audit_info.models import AWS_Audit_Info + +AWS_ACCOUNT_NUMBER = "123456789012" +AWS_REGION = "us-east-1" + + +class Test_s3_account_level_public_access_blocks: + # Mocked Audit Info + def set_mocked_audit_info(self): + audit_info = AWS_Audit_Info( + original_session=None, + audit_session=session.Session( + profile_name=None, + botocore_session=None, + region_name=AWS_REGION, + ), + audited_account=AWS_ACCOUNT_NUMBER, + audited_user_id=None, + audited_partition="aws", + audited_identity_arn=None, + profile=None, + profile_region=AWS_REGION, + credentials=None, + assumed_role_info=None, + audited_regions=None, + organizations_metadata=None, + ) + return audit_info + + @mock_s3 + @mock_s3control + def test_bucket_account_public_block(self): + # Generate S3Control Client + s3control_client = client("s3control", region_name=AWS_REGION) + s3control_client.put_public_access_block( + AccountId=AWS_ACCOUNT_NUMBER, + PublicAccessBlockConfiguration={ + "BlockPublicAcls": True, + "IgnorePublicAcls": True, + "BlockPublicPolicy": True, + "RestrictPublicBuckets": True, + }, + ) + from providers.aws.services.s3.s3_service import S3, S3Control + + audit_info = self.set_mocked_audit_info() + + with mock.patch( + "providers.aws.lib.audit_info.audit_info.current_audit_info", new=audit_info + ): + with mock.patch( + "providers.aws.services.s3.s3_account_level_public_access_blocks.s3_account_level_public_access_blocks.s3_client", + new=S3(audit_info), + ): + with mock.patch( + "providers.aws.services.s3.s3_account_level_public_access_blocks.s3_account_level_public_access_blocks.s3control_client", + new=S3Control(audit_info), + ): + # Test Check + from providers.aws.services.s3.s3_account_level_public_access_blocks.s3_account_level_public_access_blocks import ( + s3_account_level_public_access_blocks, + ) + + check = s3_account_level_public_access_blocks() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert ( + result[0].status_extended + == f"Block Public Access is configured for the account {AWS_ACCOUNT_NUMBER}." + ) + assert result[0].resource_id == AWS_ACCOUNT_NUMBER + assert result[0].region == AWS_REGION + + @mock_s3 + @mock_s3control + def test_bucket_without_account_public_block(self): + # Generate S3Control Client + s3control_client = client("s3control", region_name=AWS_REGION) + s3control_client.put_public_access_block( + AccountId=AWS_ACCOUNT_NUMBER, + PublicAccessBlockConfiguration={ + "BlockPublicAcls": False, + "IgnorePublicAcls": False, + "BlockPublicPolicy": False, + "RestrictPublicBuckets": False, + }, + ) + from providers.aws.services.s3.s3_service import S3, S3Control + + audit_info = self.set_mocked_audit_info() + + with mock.patch( + "providers.aws.lib.audit_info.audit_info.current_audit_info", new=audit_info + ): + with mock.patch( + "providers.aws.services.s3.s3_account_level_public_access_blocks.s3_account_level_public_access_blocks.s3_client", + new=S3(audit_info), + ): + with mock.patch( + "providers.aws.services.s3.s3_account_level_public_access_blocks.s3_account_level_public_access_blocks.s3control_client", + new=S3Control(audit_info), + ): + # Test Check + from providers.aws.services.s3.s3_account_level_public_access_blocks.s3_account_level_public_access_blocks import ( + s3_account_level_public_access_blocks, + ) + + check = s3_account_level_public_access_blocks() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert ( + result[0].status_extended + == f"Block Public Access is not configured for the account {AWS_ACCOUNT_NUMBER}." + ) + assert result[0].resource_id == AWS_ACCOUNT_NUMBER + assert result[0].region == AWS_REGION diff --git a/providers/aws/services/s3/s3_bucket_public_access/s3_bucket_public_access.py b/providers/aws/services/s3/s3_bucket_public_access/s3_bucket_public_access.py index 60412557..d8c3064a 100644 --- a/providers/aws/services/s3/s3_bucket_public_access/s3_bucket_public_access.py +++ b/providers/aws/services/s3/s3_bucket_public_access/s3_bucket_public_access.py @@ -8,7 +8,8 @@ class s3_bucket_public_access(Check): findings = [] # 1. Check if public buckets are restricted at account level if ( - s3control_client.account_public_access_block.ignore_public_acls + s3control_client.account_public_access_block + and s3control_client.account_public_access_block.ignore_public_acls and s3control_client.account_public_access_block.restrict_public_buckets ): report = Check_Report(self.metadata) diff --git a/prowler b/prowler index ca219eab..03209b5f 100755 --- a/prowler +++ b/prowler @@ -330,6 +330,7 @@ if __name__ == "__main__": ) # Execute checks + findings = [] if len(checks_to_execute): findings = execute_checks( checks_to_execute, provider, audit_info, audit_output_options