feat(ignore unused services): add --ignore-unused-services argument to ignore findings from services not in actual use (#2936)

This commit is contained in:
Sergio Garcia
2023-10-24 14:09:27 +02:00
committed by GitHub
parent 2aa3126eb0
commit b822c19d2c
89 changed files with 4587 additions and 1157 deletions

View File

@@ -153,6 +153,16 @@ def init_parser(self):
help="Set the maximum attemps for the Boto3 standard retrier config (Default: 3)",
)
# Ignore Unused Services
ignore_unused_services_subparser = aws_parser.add_argument_group(
"Ignore Unused Services"
)
ignore_unused_services_subparser.add_argument(
"--ignore-unused-services",
action="store_true",
help="Ignore findings in unused services",
)
def validate_session_duration(duration):
"""validate_session_duration validates that the AWS STS Assume Role Session Duration is between 900 and 43200 seconds."""

View File

@@ -37,4 +37,5 @@ current_audit_info = AWS_Audit_Info(
organizations_metadata=None,
audit_metadata=None,
audit_config=None,
ignore_unused_services=False,
)

View File

@@ -52,3 +52,4 @@ class AWS_Audit_Info:
organizations_metadata: AWS_Organizations_Info
audit_metadata: Optional[Any] = None
audit_config: Optional[dict] = None
ignore_unused_services: bool = False

View File

@@ -10,30 +10,27 @@ class accessanalyzer_enabled(Check):
for analyzer in accessanalyzer_client.analyzers:
report = Check_Report_AWS(self.metadata())
report.region = analyzer.region
report.resource_id = analyzer.name
report.resource_arn = analyzer.arn
report.resource_tags = analyzer.tags
if analyzer.status == "ACTIVE":
report.status = "PASS"
report.status_extended = (
f"IAM Access Analyzer {analyzer.name} is enabled."
)
report.resource_id = analyzer.name
report.resource_arn = analyzer.arn
report.resource_tags = analyzer.tags
elif analyzer.status == "NOT_AVAILABLE":
report.status = "FAIL"
report.status_extended = (
f"IAM Access Analyzer in account {analyzer.name} is not enabled."
)
report.resource_id = analyzer.name
report.resource_arn = analyzer.arn
else:
report.status = "FAIL"
report.status_extended = (
f"IAM Access Analyzer {analyzer.name} is not active."
)
report.resource_id = analyzer.name
report.resource_arn = analyzer.arn
report.resource_tags = analyzer.tags
findings.append(report)
return findings

View File

@@ -28,21 +28,6 @@ class accessanalyzer_enabled_without_findings(Check):
report.resource_id = analyzer.name
report.resource_arn = analyzer.arn
report.resource_tags = analyzer.tags
elif analyzer.status == "NOT_AVAILABLE":
report.status = "FAIL"
report.status_extended = (
f"IAM Access Analyzer in account {analyzer.name} is not enabled."
)
report.resource_id = analyzer.name
report.resource_arn = analyzer.arn
else:
report.status = "FAIL"
report.status_extended = (
f"IAM Access Analyzer {analyzer.name} is not active."
)
report.resource_id = analyzer.name
report.resource_arn = analyzer.arn
report.resource_tags = analyzer.tags
findings.append(report)
findings.append(report)
return findings

View File

@@ -15,6 +15,7 @@ class Athena(AWSService):
self.workgroups = {}
self.__threading_call__(self.__list_workgroups__)
self.__get_workgroups__()
self.__list_query_executions__()
self.__list_tags_for_resource__()
def __list_workgroups__(self, regional_client):
@@ -22,17 +23,22 @@ class Athena(AWSService):
try:
list_workgroups = regional_client.list_work_groups()
for workgroup in list_workgroups["WorkGroups"]:
workgroup_name = workgroup["Name"]
workgroup_arn = f"arn:{self.audited_partition}:athena:{regional_client.region}:{self.audited_account}:workgroup/{workgroup_name}"
if not self.audit_resources or (
is_resource_filtered(workgroup_arn, self.audit_resources)
):
self.workgroups[workgroup_arn] = WorkGroup(
arn=workgroup_arn,
name=workgroup_name,
region=regional_client.region,
try:
workgroup_name = workgroup["Name"]
workgroup_arn = f"arn:{self.audited_partition}:athena:{regional_client.region}:{self.audited_account}:workgroup/{workgroup_name}"
if not self.audit_resources or (
is_resource_filtered(workgroup_arn, self.audit_resources)
):
self.workgroups[workgroup_arn] = WorkGroup(
arn=workgroup_arn,
name=workgroup_name,
state=workgroup["State"],
region=regional_client.region,
)
except Exception as error:
logger.error(
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -42,38 +48,64 @@ class Athena(AWSService):
logger.info("Athena - Getting WorkGroups...")
try:
for workgroup in self.workgroups.values():
wg = self.regional_clients[workgroup.region].get_work_group(
WorkGroup=workgroup.name
)
wg_configuration = wg.get("WorkGroup").get("Configuration")
self.workgroups[
workgroup.arn
].enforce_workgroup_configuration = wg_configuration.get(
"EnforceWorkGroupConfiguration", False
)
# We include an empty EncryptionConfiguration to handle if the workgroup does not have encryption configured
encryption = (
wg_configuration.get(
"ResultConfiguration",
{"EncryptionConfiguration": {}},
try:
wg = self.regional_clients[workgroup.region].get_work_group(
WorkGroup=workgroup.name
)
.get(
"EncryptionConfiguration",
{"EncryptionOption": ""},
)
.get("EncryptionOption")
)
if encryption in ["SSE_S3", "SSE_KMS", "CSE_KMS"]:
encryption_configuration = EncryptionConfiguration(
encryption_option=encryption, encrypted=True
)
wg_configuration = wg.get("WorkGroup").get("Configuration")
self.workgroups[
workgroup.arn
].encryption_configuration = encryption_configuration
].enforce_workgroup_configuration = wg_configuration.get(
"EnforceWorkGroupConfiguration", False
)
# We include an empty EncryptionConfiguration to handle if the workgroup does not have encryption configured
encryption = (
wg_configuration.get(
"ResultConfiguration",
{"EncryptionConfiguration": {}},
)
.get(
"EncryptionConfiguration",
{"EncryptionOption": ""},
)
.get("EncryptionOption")
)
if encryption in ["SSE_S3", "SSE_KMS", "CSE_KMS"]:
encryption_configuration = EncryptionConfiguration(
encryption_option=encryption, encrypted=True
)
self.workgroups[
workgroup.arn
].encryption_configuration = encryption_configuration
except Exception as error:
logger.error(
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def __list_query_executions__(self):
logger.info("Athena - Listing Queries...")
try:
for workgroup in self.workgroups.values():
try:
queries = (
self.regional_clients[workgroup.region]
.list_query_executions(WorkGroup=workgroup.name)
.get("QueryExecutionIds", [])
)
if queries:
workgroup.queries = True
except Exception as error:
logger.error(
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -83,10 +115,15 @@ class Athena(AWSService):
logger.info("Athena - Listing Tags...")
try:
for workgroup in self.workgroups.values():
regional_client = self.regional_clients[workgroup.region]
workgroup.tags = regional_client.list_tags_for_resource(
ResourceARN=workgroup.arn
)["Tags"]
try:
regional_client = self.regional_clients[workgroup.region]
workgroup.tags = regional_client.list_tags_for_resource(
ResourceARN=workgroup.arn
)["Tags"]
except Exception as error:
logger.error(
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -101,9 +138,11 @@ class EncryptionConfiguration(BaseModel):
class WorkGroup(BaseModel):
arn: str
name: str
state: str
encryption_configuration: EncryptionConfiguration = EncryptionConfiguration(
encryption_option="", encrypted=False
)
enforce_workgroup_configuration: bool = False
queries: bool = False
region: str
tags: Optional[list] = []

View File

@@ -8,7 +8,7 @@
"ServiceName": "athena",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:athena:region:account-id:workgroup/resource-id",
"Severity": "high",
"Severity": "medium",
"ResourceType": "WorkGroup",
"Description": "Ensure that encryption at rest is enabled for Amazon Athena query results stored in Amazon S3 in order to secure data and meet compliance requirements for data-at-rest encryption.",
"Risk": "If not enabled sensitive information at rest is not protected.",

View File

@@ -9,19 +9,23 @@ class athena_workgroup_encryption(Check):
"""Execute the athena_workgroup_encryption check"""
findings = []
for workgroup in athena_client.workgroups.values():
report = Check_Report_AWS(self.metadata())
report.region = workgroup.region
report.resource_id = workgroup.name
report.resource_arn = workgroup.arn
report.resource_tags = workgroup.tags
# Only check for enabled and used workgroups (has recent queries)
if (
workgroup.state == "ENABLED" and workgroup.queries
) or not athena_client.audit_info.ignore_unused_services:
report = Check_Report_AWS(self.metadata())
report.region = workgroup.region
report.resource_id = workgroup.name
report.resource_arn = workgroup.arn
report.resource_tags = workgroup.tags
if workgroup.encryption_configuration.encrypted:
report.status = "PASS"
report.status_extended = f"Athena WorkGroup {workgroup.name} encrypts the query results using {workgroup.encryption_configuration.encryption_option}."
else:
report.status = "FAIL"
report.status_extended = f"Athena WorkGroup {workgroup.name} does not encrypt the query results."
if workgroup.encryption_configuration.encrypted:
report.status = "PASS"
report.status_extended = f"Athena WorkGroup {workgroup.name} encrypts the query results using {workgroup.encryption_configuration.encryption_option}."
else:
report.status = "FAIL"
report.status_extended = f"Athena WorkGroup {workgroup.name} does not encrypt the query results."
findings.append(report)
findings.append(report)
return findings

View File

@@ -9,19 +9,23 @@ class athena_workgroup_enforce_configuration(Check):
"""Execute the athena_workgroup_enforce_configuration check"""
findings = []
for workgroup in athena_client.workgroups.values():
report = Check_Report_AWS(self.metadata())
report.region = workgroup.region
report.resource_id = workgroup.name
report.resource_arn = workgroup.arn
report.resource_tags = workgroup.tags
# Only check for enabled and used workgroups (has recent queries)
if (
workgroup.state == "ENABLED" and workgroup.queries
) or not athena_client.audit_info.ignore_unused_services:
report = Check_Report_AWS(self.metadata())
report.region = workgroup.region
report.resource_id = workgroup.name
report.resource_arn = workgroup.arn
report.resource_tags = workgroup.tags
if workgroup.enforce_workgroup_configuration:
report.status = "PASS"
report.status_extended = f"Athena WorkGroup {workgroup.name} enforces the workgroup configuration, so it cannot be overridden by the client-side settings."
else:
report.status = "FAIL"
report.status_extended = f"Athena WorkGroup {workgroup.name} does not enforce the workgroup configuration, so it can be overridden by the client-side settings."
if workgroup.enforce_workgroup_configuration:
report.status = "PASS"
report.status_extended = f"Athena WorkGroup {workgroup.name} enforces the workgroup configuration, so it cannot be overridden by the client-side settings."
else:
report.status = "FAIL"
report.status_extended = f"Athena WorkGroup {workgroup.name} does not enforce the workgroup configuration, so it can be overridden by the client-side settings."
findings.append(report)
findings.append(report)
return findings

View File

@@ -5,18 +5,20 @@ from prowler.providers.aws.services.backup.backup_client import backup_client
class backup_plans_exist(Check):
def execute(self):
findings = []
report = Check_Report_AWS(self.metadata())
report.status = "FAIL"
report.status_extended = "No Backup Plan exist."
report.resource_arn = backup_client.audited_account_arn
report.resource_id = backup_client.audited_account
report.region = backup_client.region
if backup_client.backup_plans:
report = Check_Report_AWS(self.metadata())
report.status = "PASS"
report.status_extended = f"At least one backup plan exists: {backup_client.backup_plans[0].name}."
report.status_extended = f"At least one Backup Plan exists: {backup_client.backup_plans[0].name}."
report.resource_arn = backup_client.backup_plans[0].arn
report.resource_id = backup_client.backup_plans[0].name
report.region = backup_client.backup_plans[0].region
findings.append(report)
findings.append(report)
elif backup_client.backup_vaults:
report = Check_Report_AWS(self.metadata())
report.status = "FAIL"
report.status_extended = "No Backup Plan exist."
report.resource_arn = backup_client.audited_account_arn
report.resource_id = backup_client.audited_account
report.region = backup_client.region
findings.append(report)
return findings

View File

@@ -5,7 +5,7 @@ from prowler.providers.aws.services.backup.backup_client import backup_client
class backup_reportplans_exist(Check):
def execute(self):
findings = []
# We only check report plans if backup plans exist, reducing noise
# We only check report plans if backup plans exist
if backup_client.backup_plans:
report = Check_Report_AWS(self.metadata())
report.status = "FAIL"

View File

@@ -2,17 +2,12 @@ from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
cloudtrail_client,
)
from prowler.providers.aws.services.s3.s3_client import s3_client
class cloudtrail_s3_dataevents_read_enabled(Check):
def execute(self):
findings = []
report = Check_Report_AWS(self.metadata())
report.region = cloudtrail_client.region
report.resource_arn = cloudtrail_client.audited_account_arn
report.resource_id = cloudtrail_client.audited_account
report.status = "FAIL"
report.status_extended = "No CloudTrail trails have a data event to record all S3 object-level API operations."
for trail in cloudtrail_client.trails:
for data_event in trail.data_events:
# classic event selectors
@@ -31,12 +26,14 @@ class cloudtrail_s3_dataevents_read_enabled(Check):
or f"arn:{cloudtrail_client.audited_partition}:s3:::*/*"
in resource["Values"]
):
report = Check_Report_AWS(self.metadata())
report.region = trail.region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags
report.status = "PASS"
report.status_extended = f"Trail {trail.name} from home region {trail.home_region} has a classic data event selector to record all S3 object-level API operations."
findings.append(report)
# advanced event selectors
elif data_event.is_advanced:
for field_selector in data_event.event_selector["FieldSelectors"]:
@@ -44,12 +41,22 @@ class cloudtrail_s3_dataevents_read_enabled(Check):
field_selector["Field"] == "resources.type"
and field_selector["Equals"][0] == "AWS::S3::Object"
):
report = Check_Report_AWS(self.metadata())
report.region = trail.region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags
report.status = "PASS"
report.status_extended = f"Trail {trail.name} from home region {trail.home_region} has an advanced data event selector to record all S3 object-level API operations."
findings.append(report)
findings.append(report)
if not findings and (
s3_client.buckets or not cloudtrail_client.audit_info.ignore_unused_services
):
report = Check_Report_AWS(self.metadata())
report.region = cloudtrail_client.region
report.resource_arn = cloudtrail_client.audited_account_arn
report.resource_id = cloudtrail_client.audited_account
report.status = "FAIL"
report.status_extended = "No CloudTrail trails have a data event to record all S3 object-level API operations."
findings.append(report)
return findings

View File

@@ -2,17 +2,12 @@ from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
cloudtrail_client,
)
from prowler.providers.aws.services.s3.s3_client import s3_client
class cloudtrail_s3_dataevents_write_enabled(Check):
def execute(self):
findings = []
report = Check_Report_AWS(self.metadata())
report.region = cloudtrail_client.region
report.resource_arn = cloudtrail_client.audited_account_arn
report.resource_id = cloudtrail_client.audited_account
report.status = "FAIL"
report.status_extended = "No CloudTrail trails have a data event to record all S3 object-level API operations."
for trail in cloudtrail_client.trails:
for data_event in trail.data_events:
# Classic event selectors
@@ -31,12 +26,14 @@ class cloudtrail_s3_dataevents_write_enabled(Check):
or f"arn:{cloudtrail_client.audited_partition}:s3:::*/*"
in resource["Values"]
):
report = Check_Report_AWS(self.metadata())
report.region = trail.region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags
report.status = "PASS"
report.status_extended = f"Trail {trail.name} from home region {trail.home_region} has a classic data event selector to record all S3 object-level API operations."
findings.append(report)
# Advanced event selectors
elif data_event.is_advanced:
for field_selector in data_event.event_selector["FieldSelectors"]:
@@ -44,11 +41,22 @@ class cloudtrail_s3_dataevents_write_enabled(Check):
field_selector["Field"] == "resources.type"
and field_selector["Equals"][0] == "AWS::S3::Object"
):
report = Check_Report_AWS(self.metadata())
report.region = trail.region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags
report.status = "PASS"
report.status_extended = f"Trail {trail.name} from home region {trail.home_region} has an advanced data event selector to record all S3 object-level API operations."
findings.append(report)
findings.append(report)
if not findings and (
s3_client.buckets or not cloudtrail_client.audit_info.ignore_unused_services
):
report = Check_Report_AWS(self.metadata())
report.region = cloudtrail_client.region
report.resource_arn = cloudtrail_client.audited_account_arn
report.resource_id = cloudtrail_client.audited_account
report.status = "FAIL"
report.status_extended = "No CloudTrail trails have a data event to record all S3 object-level API operations."
findings.append(report)
return findings

View File

@@ -10,12 +10,16 @@ class ec2_ebs_default_encryption(Check):
report.region = ebs_encryption.region
report.resource_arn = ec2_client.audited_account_arn
report.resource_id = ec2_client.audited_account
report.status = "FAIL"
report.status_extended = "EBS Default Encryption is not activated."
if ebs_encryption.status:
report.status = "PASS"
report.status_extended = "EBS Default Encryption is activated."
findings.append(report)
findings.append(report)
elif (
not ec2_client.audit_info.ignore_unused_services
or ebs_encryption.volumes
):
report.status = "FAIL"
report.status_extended = "EBS Default Encryption is not activated."
findings.append(report)
return findings

View File

@@ -9,17 +9,28 @@ class ec2_networkacl_allow_ingress_any_port(Check):
tcp_protocol = "-1"
check_port = 0
for network_acl in ec2_client.network_acls:
report = Check_Report_AWS(self.metadata())
report.region = network_acl.region
report.resource_id = network_acl.id
report.resource_arn = network_acl.arn
report.resource_tags = network_acl.tags
report.status = "PASS"
report.status_extended = f"Network ACL {network_acl.name if network_acl.name else network_acl.id} does not have every port open to the Internet."
# If some entry allows it, that ACL is not securely configured
if check_network_acl(network_acl.entries, tcp_protocol, check_port):
report.status = "FAIL"
report.status_extended = f"Network ACL {network_acl.name if network_acl.name else network_acl.id} has every port open to the Internet."
findings.append(report)
if (
not ec2_client.audit_info.ignore_unused_services
or network_acl.region in ec2_client.regions_with_sgs
):
# If some entry allows it, that ACL is not securely configured
if check_network_acl(network_acl.entries, tcp_protocol, check_port):
report = Check_Report_AWS(self.metadata())
report.resource_id = network_acl.id
report.region = network_acl.region
report.resource_arn = network_acl.arn
report.resource_tags = network_acl.tags
report.status = "FAIL"
report.status_extended = f"Network ACL {network_acl.name if network_acl.name else network_acl.id} has every port open to the Internet."
findings.append(report)
else:
report = Check_Report_AWS(self.metadata())
report.resource_id = network_acl.id
report.region = network_acl.region
report.resource_arn = network_acl.arn
report.resource_tags = network_acl.tags
report.status = "PASS"
report.status_extended = f"Network ACL {network_acl.name if network_acl.name else network_acl.id} does not have every port open to the Internet."
findings.append(report)
return findings

View File

@@ -9,17 +9,28 @@ class ec2_networkacl_allow_ingress_tcp_port_22(Check):
tcp_protocol = "6"
check_port = 22
for network_acl in ec2_client.network_acls:
report = Check_Report_AWS(self.metadata())
report.region = network_acl.region
report.resource_arn = network_acl.arn
report.resource_tags = network_acl.tags
report.status = "PASS"
report.status_extended = f"Network ACL {network_acl.name if network_acl.name else network_acl.id} does not have SSH port 22 open to the Internet."
report.resource_id = network_acl.id
# If some entry allows it, that ACL is not securely configured
if check_network_acl(network_acl.entries, tcp_protocol, check_port):
report.status = "FAIL"
report.status_extended = f"Network ACL {network_acl.name if network_acl.name else network_acl.id} has SSH port 22 open to the Internet."
findings.append(report)
if (
not ec2_client.audit_info.ignore_unused_services
or network_acl.region in ec2_client.regions_with_sgs
):
# If some entry allows it, that ACL is not securely configured
if check_network_acl(network_acl.entries, tcp_protocol, check_port):
report = Check_Report_AWS(self.metadata())
report.resource_id = network_acl.id
report.region = network_acl.region
report.resource_arn = network_acl.arn
report.resource_tags = network_acl.tags
report.status = "FAIL"
report.status_extended = f"Network ACL {network_acl.name if network_acl.name else network_acl.id} has SSH port 22 open to the Internet."
findings.append(report)
else:
report = Check_Report_AWS(self.metadata())
report.resource_id = network_acl.id
report.region = network_acl.region
report.resource_arn = network_acl.arn
report.resource_tags = network_acl.tags
report.status = "PASS"
report.status_extended = f"Network ACL {network_acl.name if network_acl.name else network_acl.id} does not have SSH port 22 open to the Internet."
findings.append(report)
return findings

View File

@@ -9,17 +9,28 @@ class ec2_networkacl_allow_ingress_tcp_port_3389(Check):
tcp_protocol = "6"
check_port = 3389
for network_acl in ec2_client.network_acls:
report = Check_Report_AWS(self.metadata())
report.region = network_acl.region
report.resource_arn = network_acl.arn
report.resource_tags = network_acl.tags
report.status = "PASS"
report.status_extended = f"Network ACL {network_acl.name if network_acl.name else network_acl.id} does not have Microsoft RDP port 3389 open to the Internet."
report.resource_id = network_acl.id
# If some entry allows it, that ACL is not securely configured
if check_network_acl(network_acl.entries, tcp_protocol, check_port):
report.status = "FAIL"
report.status_extended = f"Network ACL {network_acl.name if network_acl.name else network_acl.id} has Microsoft RDP port 3389 open to the Internet."
findings.append(report)
if (
not ec2_client.audit_info.ignore_unused_services
or network_acl.region in ec2_client.regions_with_sgs
):
# If some entry allows it, that ACL is not securely configured
if check_network_acl(network_acl.entries, tcp_protocol, check_port):
report = Check_Report_AWS(self.metadata())
report.resource_id = network_acl.id
report.region = network_acl.region
report.resource_arn = network_acl.arn
report.resource_tags = network_acl.tags
report.status = "FAIL"
report.status_extended = f"Network ACL {network_acl.name if network_acl.name else network_acl.id} has Microsoft RDP port 3389 open to the Internet."
findings.append(report)
else:
report = Check_Report_AWS(self.metadata())
report.resource_id = network_acl.id
report.region = network_acl.region
report.resource_arn = network_acl.arn
report.resource_tags = network_acl.tags
report.status = "PASS"
report.status_extended = f"Network ACL {network_acl.name if network_acl.name else network_acl.id} does not have Microsoft RDP port 3389 open to the Internet."
findings.append(report)
return findings

View File

@@ -1,22 +1,29 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_allow_ingress_from_internet_to_any_port(Check):
def execute(self):
findings = []
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have all ports open to the Internet."
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
if security_group.public_ports:
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has all ports open to the Internet."
findings.append(report)
# Check if ignoring flag is set and if the VPC and the SG is in use
if not ec2_client.audit_info.ignore_unused_services or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have all ports open to the Internet."
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
if security_group.public_ports:
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has all ports open to the Internet."
findings.append(report)
return findings

View File

@@ -1,6 +1,7 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.ec2.lib.security_groups import check_security_group
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018(Check):
@@ -8,23 +9,29 @@ class ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018(
findings = []
check_ports = [27017, 27018]
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have MongoDB ports 27017 and 27018 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has MongoDB ports 27017 and 27018 open to the Internet."
break
findings.append(report)
# Check if ignoring flag is set and if the VPC and the SG is in use
if not ec2_client.audit_info.ignore_unused_services or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have MongoDB ports 27017 and 27018 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has MongoDB ports 27017 and 27018 open to the Internet."
break
findings.append(report)
return findings

View File

@@ -1,6 +1,7 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.ec2.lib.security_groups import check_security_group
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21(Check):
@@ -8,23 +9,29 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21(Check)
findings = []
check_ports = [20, 21]
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have FTP ports 20 and 21 open to the Internet."
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has FTP ports 20 and 21 open to the Internet."
break
findings.append(report)
# Check if ignoring flag is set and if the VPC and the SG is in use
if not ec2_client.audit_info.ignore_unused_services or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have FTP ports 20 and 21 open to the Internet."
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has FTP ports 20 and 21 open to the Internet."
break
findings.append(report)
return findings

View File

@@ -1,6 +1,7 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.ec2.lib.security_groups import check_security_group
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22(Check):
@@ -8,23 +9,29 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22(Check):
findings = []
check_ports = [22]
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have SSH port 22 open to the Internet."
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has SSH port 22 open to the Internet."
break
findings.append(report)
# Check if ignoring flag is set and if the VPC and the SG is in use
if not ec2_client.audit_info.ignore_unused_services or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have SSH port 22 open to the Internet."
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has SSH port 22 open to the Internet."
break
findings.append(report)
return findings

View File

@@ -1,6 +1,7 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.ec2.lib.security_groups import check_security_group
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389(Check):
@@ -8,23 +9,29 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389(Check):
findings = []
check_ports = [3389]
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Microsoft RDP port 3389 open to the Internet."
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Microsoft RDP port 3389 open to the Internet."
break
findings.append(report)
# Check if ignoring flag is set and if the VPC and the SG is in use
if not ec2_client.audit_info.ignore_unused_services or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Microsoft RDP port 3389 open to the Internet."
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Microsoft RDP port 3389 open to the Internet."
break
findings.append(report)
return findings

View File

@@ -1,6 +1,7 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.ec2.lib.security_groups import check_security_group
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888(
@@ -10,23 +11,29 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9
findings = []
check_ports = [7199, 9160, 8888]
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Casandra ports 7199, 8888 and 9160 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Casandra ports 7199, 8888 and 9160 open to the Internet."
break
findings.append(report)
# Check if ignoring flag is set and if the VPC and the SG is in use
if not ec2_client.audit_info.ignore_unused_services or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Casandra ports 7199, 8888 and 9160 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Casandra ports 7199, 8888 and 9160 open to the Internet."
break
findings.append(report)
return findings

View File

@@ -1,6 +1,7 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.ec2.lib.security_groups import check_security_group
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601(
@@ -10,23 +11,29 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_ki
findings = []
check_ports = [9200, 9300, 5601]
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Elasticsearch/Kibana ports 9200, 9300 and 5601 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Elasticsearch/Kibana ports 9200, 9300 and 5601 open to the Internet."
break
findings.append(report)
# Check if ignoring flag is set and if the VPC and the SG is in use
if not ec2_client.audit_info.ignore_unused_services or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Elasticsearch/Kibana ports 9200, 9300 and 5601 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Elasticsearch/Kibana ports 9200, 9300 and 5601 open to the Internet."
break
findings.append(report)
return findings

View File

@@ -1,6 +1,7 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.ec2.lib.security_groups import check_security_group
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092(Check):
@@ -8,23 +9,29 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092(Check
findings = []
check_ports = [9092]
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Kafka port 9092 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Kafka port 9092 open to the Internet."
break
findings.append(report)
# Check if ignoring flag is set and if the VPC and the SG is in use
if not ec2_client.audit_info.ignore_unused_services or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Kafka port 9092 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Kafka port 9092 open to the Internet."
break
findings.append(report)
return findings

View File

@@ -1,6 +1,7 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.ec2.lib.security_groups import check_security_group
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211(Check):
@@ -8,23 +9,29 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211(
findings = []
check_ports = [11211]
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Memcached port 11211 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Memcached port 11211 open to the Internet."
break
findings.append(report)
# Check if ignoring flag is set and if the VPC and the SG is in use
if not ec2_client.audit_info.ignore_unused_services or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Memcached port 11211 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Memcached port 11211 open to the Internet."
break
findings.append(report)
return findings

View File

@@ -1,6 +1,7 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.ec2.lib.security_groups import check_security_group
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306(Check):
@@ -8,25 +9,31 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306(Check
findings = []
check_ports = [3306]
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have MySQL port 3306 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has MySQL port 3306 open to the Internet."
report.resource_details = security_group.name
report.resource_id = security_group.id
break
findings.append(report)
# Check if ignoring flag is set and if the VPC and the SG is in use
if not ec2_client.audit_info.ignore_unused_services or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have MySQL port 3306 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has MySQL port 3306 open to the Internet."
report.resource_details = security_group.name
report.resource_id = security_group.id
break
findings.append(report)
return findings

View File

@@ -1,6 +1,7 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.ec2.lib.security_groups import check_security_group
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483(Check):
@@ -8,23 +9,29 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483
findings = []
check_ports = [1521, 2483]
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Oracle ports 1521 and 2483 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Oracle ports 1521 and 2483 open to the Internet."
break
findings.append(report)
# Check if ignoring flag is set and if the VPC and the SG is in use
if not ec2_client.audit_info.ignore_unused_services or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Oracle ports 1521 and 2483 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Oracle ports 1521 and 2483 open to the Internet."
break
findings.append(report)
return findings

View File

@@ -1,6 +1,7 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.ec2.lib.security_groups import check_security_group
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432(Check):
@@ -8,23 +9,29 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432(Ch
findings = []
check_ports = [5432]
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Postgres port 5432 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Postgres port 5432 open to the Internet."
break
findings.append(report)
# Check if ignoring flag is set and if the VPC and the SG is in use
if not ec2_client.audit_info.ignore_unused_services or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Postgres port 5432 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Postgres port 5432 open to the Internet."
break
findings.append(report)
return findings

View File

@@ -1,6 +1,7 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.ec2.lib.security_groups import check_security_group
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379(Check):
@@ -8,23 +9,29 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379(Check
findings = []
check_ports = [6379]
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Redis port 6379 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Redis port 6379 open to the Internet."
break
findings.append(report)
# Check if ignoring flag is set and if the VPC and the SG is in use
if not ec2_client.audit_info.ignore_unused_services or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Redis port 6379 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Redis port 6379 open to the Internet."
break
findings.append(report)
return findings

View File

@@ -1,6 +1,7 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.ec2.lib.security_groups import check_security_group
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434(
@@ -10,23 +11,29 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_
findings = []
check_ports = [1433, 1434]
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Microsoft SQL Server ports 1433 and 1434 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Microsoft SQL Server ports 1433 and 1434 open to the Internet."
break
findings.append(report)
# Check if ignoring flag is set and if the VPC and the SG is in use
if not ec2_client.audit_info.ignore_unused_services or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Microsoft SQL Server ports 1433 and 1434 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Microsoft SQL Server ports 1433 and 1434 open to the Internet."
break
findings.append(report)
return findings

View File

@@ -1,6 +1,7 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.ec2.lib.security_groups import check_security_group
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23(Check):
@@ -8,23 +9,29 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23(Check)
findings = []
check_ports = [23]
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Telnet port 23 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Telnet port 23 open to the Internet."
break
findings.append(report)
# Check if ignoring flag is set and if the VPC and the SG is in use
if not ec2_client.audit_info.ignore_unused_services or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) does not have Telnet port 23 open to the Internet."
if not security_group.public_ports:
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
if check_security_group(
ingress_rule, "tcp", check_ports, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has Telnet port 23 open to the Internet."
break
findings.append(report)
return findings

View File

@@ -2,6 +2,7 @@ import ipaddress
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_allow_wide_open_public_ipv4(Check):
@@ -9,42 +10,48 @@ class ec2_securitygroup_allow_wide_open_public_ipv4(Check):
findings = []
cidr_treshold = 24
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has no potential wide-open non-RFC1918 address."
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
for ipv4 in ingress_rule["IpRanges"]:
ip = ipaddress.ip_network(ipv4["CidrIp"])
# Check if IP is public according to RFC1918 and if 0 < prefixlen < 24
if (
ip.is_global
and ip.prefixlen < cidr_treshold
and ip.prefixlen > 0
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has potential wide-open non-RFC1918 address {ipv4['CidrIp']} in ingress rule."
break
# Check if ignoring flag is set and if the VPC and the SG is in use
if not ec2_client.audit_info.ignore_unused_services or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "PASS"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has no potential wide-open non-RFC1918 address."
# Loop through every security group's ingress rule and check it
for ingress_rule in security_group.ingress_rules:
for ipv4 in ingress_rule["IpRanges"]:
ip = ipaddress.ip_network(ipv4["CidrIp"])
# Check if IP is public according to RFC1918 and if 0 < prefixlen < 24
if (
ip.is_global
and ip.prefixlen < cidr_treshold
and ip.prefixlen > 0
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has potential wide-open non-RFC1918 address {ipv4['CidrIp']} in ingress rule."
break
# Loop through every security group's egress rule and check it
for egress_rule in security_group.egress_rules:
for ipv4 in egress_rule["IpRanges"]:
ip = ipaddress.ip_network(ipv4["CidrIp"])
# Check if IP is public according to RFC1918 and if 0 < prefixlen < 24
if (
ip.is_global
and ip.prefixlen < cidr_treshold
and ip.prefixlen > 0
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has potential wide-open non-RFC1918 address {ipv4['CidrIp']} in egress rule."
break
# Loop through every security group's egress rule and check it
for egress_rule in security_group.egress_rules:
for ipv4 in egress_rule["IpRanges"]:
ip = ipaddress.ip_network(ipv4["CidrIp"])
# Check if IP is public according to RFC1918 and if 0 < prefixlen < 24
if (
ip.is_global
and ip.prefixlen < cidr_treshold
and ip.prefixlen > 0
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has potential wide-open non-RFC1918 address {ipv4['CidrIp']} in egress rule."
break
findings.append(report)
findings.append(report)
return findings

View File

@@ -19,6 +19,7 @@ class EC2(AWSService):
self.__threading_call__(self.__describe_instances__)
self.__get_instance_user_data__()
self.security_groups = []
self.regions_with_sgs = []
self.__threading_call__(self.__describe_security_groups__)
self.network_acls = []
self.__threading_call__(self.__describe_network_acls__)
@@ -116,7 +117,7 @@ class EC2(AWSService):
if not self.audit_resources or (
is_resource_filtered(arn, self.audit_resources)
):
# check if sg has public access to all ports to reduce noise
# check if sg has public access to all ports
all_public_ports = False
for ingress_rule in sg["IpPermissions"]:
if (
@@ -137,9 +138,12 @@ class EC2(AWSService):
ingress_rules=sg["IpPermissions"],
egress_rules=sg["IpPermissionsEgress"],
public_ports=all_public_ports,
vpc_id=sg["VpcId"],
tags=sg.get("Tags"),
)
)
if sg["GroupName"] != "default":
self.regions_with_sgs.append(regional_client.region)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -396,11 +400,16 @@ class EC2(AWSService):
def __get_ebs_encryption_by_default__(self, regional_client):
logger.info("EC2 - Get EBS Encryption By Default...")
try:
volumes_in_region = False
for volume in self.volumes:
if volume.region == regional_client.region:
volumes_in_region = True
self.ebs_encryption_by_default.append(
EbsEncryptionByDefault(
status=regional_client.get_ebs_encryption_by_default()[
"EbsEncryptionByDefault"
],
volumes=volumes_in_region,
region=regional_client.region,
)
)
@@ -453,6 +462,7 @@ class SecurityGroup(BaseModel):
arn: str
region: str
id: str
vpc_id: str
public_ports: bool
network_interfaces: list[str] = []
ingress_rules: list[dict]
@@ -499,4 +509,5 @@ class Image(BaseModel):
class EbsEncryptionByDefault(BaseModel):
status: bool
volumes: bool
region: str

View File

@@ -6,16 +6,18 @@ class glue_data_catalogs_connection_passwords_encryption_enabled(Check):
def execute(self):
findings = []
for encryption in glue_client.catalog_encryption_settings:
report = Check_Report_AWS(self.metadata())
report.resource_id = glue_client.audited_account
report.resource_arn = glue_client.audited_account_arn
report.region = encryption.region
report.status = "FAIL"
report.status_extended = (
"Glue data catalog connection password is not encrypted."
)
if encryption.password_encryption:
report.status = "PASS"
report.status_extended = f"Glue data catalog connection password is encrypted with KMS key {encryption.password_kms_id}."
findings.append(report)
# Check only if there are Glue Tables
if encryption.tables or not glue_client.audit_info.ignore_unused_services:
report = Check_Report_AWS(self.metadata())
report.resource_id = glue_client.audited_account
report.resource_arn = glue_client.audited_account_arn
report.region = encryption.region
report.status = "FAIL"
report.status_extended = (
"Glue data catalog connection password is not encrypted."
)
if encryption.password_encryption:
report.status = "PASS"
report.status_extended = f"Glue data catalog connection password is encrypted with KMS key {encryption.password_kms_id}."
findings.append(report)
return findings

View File

@@ -6,16 +6,18 @@ class glue_data_catalogs_metadata_encryption_enabled(Check):
def execute(self):
findings = []
for encryption in glue_client.catalog_encryption_settings:
report = Check_Report_AWS(self.metadata())
report.resource_id = glue_client.audited_account
report.resource_arn = glue_client.audited_account_arn
report.region = encryption.region
report.status = "FAIL"
report.status_extended = (
"Glue data catalog settings have metadata encryption disabled."
)
if encryption.mode == "SSE-KMS":
report.status = "PASS"
report.status_extended = f"Glue data catalog settings have metadata encryption enabled with KMS key {encryption.kms_id}."
findings.append(report)
# Check only if there are Glue Tables
if encryption.tables or not glue_client.audit_info.ignore_unused_services:
report = Check_Report_AWS(self.metadata())
report.resource_id = glue_client.audited_account
report.resource_arn = glue_client.audited_account_arn
report.region = encryption.region
report.status = "FAIL"
report.status_extended = (
"Glue data catalog settings have metadata encryption disabled."
)
if encryption.mode == "SSE-KMS":
report.status = "PASS"
report.status_extended = f"Glue data catalog settings have metadata encryption enabled with KMS key {encryption.kms_id}."
findings.append(report)
return findings

View File

@@ -166,6 +166,10 @@ class Glue(AWSService):
settings = regional_client.get_data_catalog_encryption_settings()[
"DataCatalogEncryptionSettings"
]
tables_in_region = False
for table in self.tables:
if table.region == regional_client.region:
tables_in_region = True
self.catalog_encryption_settings.append(
CatalogEncryptionSetting(
mode=settings["EncryptionAtRest"]["CatalogEncryptionMode"],
@@ -177,6 +181,7 @@ class Glue(AWSService):
"AwsKmsKeyId"
),
region=regional_client.region,
tables=tables_in_region,
)
)
except Exception as error:
@@ -206,6 +211,7 @@ class CatalogEncryptionSetting(BaseModel):
kms_id: Optional[str]
password_encryption: bool
password_kms_id: Optional[str]
tables: bool
region: str

View File

@@ -1,4 +1,7 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.awslambda.awslambda_client import awslambda_client
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.ecr.ecr_client import ecr_client
from prowler.providers.aws.services.inspector2.inspector2_client import (
inspector2_client,
)
@@ -9,8 +12,6 @@ class inspector2_findings_exist(Check):
findings = []
for inspector in inspector2_client.inspectors:
report = Check_Report_AWS(self.metadata())
report.status = "FAIL"
report.status_extended = "Inspector2 is not enabled."
report.resource_id = inspector2_client.audited_account
report.resource_arn = inspector2_client.audited_account_arn
report.region = inspector.region
@@ -30,7 +31,24 @@ class inspector2_findings_exist(Check):
report.status_extended = (
f"There are {active_findings} ACTIVE Inspector2 findings."
)
findings.append(report)
findings.append(report)
else:
if inspector2_client.audit_info.ignore_unused_services:
funtions_in_region = False
ec2_in_region = False
for function in awslambda_client.functions.values():
if function.region == inspector.region:
funtions_in_region = True
for instance in ec2_client.instances:
if instance == inspector.region:
ec2_in_region = True
if not inspector2_client.audit_info.ignore_unused_services or (
funtions_in_region
or ecr_client.registries[inspector.region].repositories
or ec2_in_region
):
report.status = "FAIL"
report.status_extended = "Inspector2 is not enabled."
findings.append(report)
return findings

View File

@@ -1,5 +1,6 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.macie.macie_client import macie_client
from prowler.providers.aws.services.s3.s3_client import s3_client
class macie_is_enabled(Check):
@@ -13,12 +14,20 @@ class macie_is_enabled(Check):
if session.status == "ENABLED":
report.status = "PASS"
report.status_extended = "Macie is enabled."
elif session.status == "PAUSED":
report.status = "FAIL"
report.status_extended = "Macie is currently in a SUSPENDED state."
findings.append(report)
else:
report.status = "FAIL"
report.status_extended = "Macie is not enabled."
findings.append(report)
if (
not macie_client.audit_info.ignore_unused_services
or session.region in s3_client.regions_with_buckets
):
if session.status == "PAUSED":
report.status = "FAIL"
report.status_extended = (
"Macie is currently in a SUSPENDED state."
)
else:
report.status = "FAIL"
report.status_extended = "Macie is not enabled."
findings.append(report)
return findings

View File

@@ -9,19 +9,20 @@ class networkfirewall_in_all_vpc(Check):
def execute(self):
findings = []
for vpc in vpc_client.vpcs.values():
report = Check_Report_AWS(self.metadata())
report.region = vpc.region
report.resource_id = vpc.id
report.resource_arn = vpc.arn
report.resource_tags = vpc.tags
report.status = "FAIL"
report.status_extended = f"VPC {vpc.name if vpc.name else vpc.id} does not have Network Firewall enabled."
for firewall in networkfirewall_client.network_firewalls:
if firewall.vpc_id == vpc.id:
report.status = "PASS"
report.status_extended = f"VPC {vpc.name if vpc.name else vpc.id} has Network Firewall enabled."
break
if not vpc_client.audit_info.ignore_unused_services or vpc.in_use:
report = Check_Report_AWS(self.metadata())
report.region = vpc.region
report.resource_id = vpc.id
report.resource_arn = vpc.arn
report.resource_tags = vpc.tags
report.status = "FAIL"
report.status_extended = f"VPC {vpc.name if vpc.name else vpc.id} does not have Network Firewall enabled."
for firewall in networkfirewall_client.network_firewalls:
if firewall.vpc_id == vpc.id:
report.status = "PASS"
report.status_extended = f"VPC {vpc.name if vpc.name else vpc.id} has Network Firewall enabled."
break
findings.append(report)
findings.append(report)
return findings

View File

@@ -1,4 +1,5 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.s3.s3_client import s3_client
from prowler.providers.aws.services.s3.s3control_client import s3control_client
@@ -6,11 +7,6 @@ class s3_account_level_public_access_blocks(Check):
def execute(self):
findings = []
report = Check_Report_AWS(self.metadata())
report.status = "FAIL"
report.status_extended = f"Block Public Access is not configured for the account {s3control_client.audited_account}."
report.region = s3control_client.region
report.resource_id = s3control_client.audited_account
report.resource_arn = s3control_client.audited_account_arn
if (
s3control_client.account_public_access_block
and s3control_client.account_public_access_block.ignore_public_acls
@@ -18,7 +14,16 @@ class s3_account_level_public_access_blocks(Check):
):
report.status = "PASS"
report.status_extended = f"Block Public Access is configured for the account {s3control_client.audited_account}."
findings.append(report)
report.region = s3control_client.region
report.resource_id = s3control_client.audited_account
report.resource_arn = s3control_client.audited_account_arn
findings.append(report)
elif s3_client.buckets or not s3_client.audit_info.ignore_unused_services:
report.status = "FAIL"
report.status_extended = f"Block Public Access is not configured for the account {s3control_client.audited_account}."
report.region = s3control_client.region
report.resource_id = s3control_client.audited_account
report.resource_arn = s3control_client.audited_account_arn
findings.append(report)
return findings

View File

@@ -15,7 +15,7 @@ class S3(AWSService):
def __init__(self, audit_info):
# Call AWSService's __init__
super().__init__(__class__.__name__, audit_info)
self.regions_with_buckets = []
self.buckets = self.__list_buckets__(audit_info)
self.__threading_call__(self.__get_bucket_versioning__)
self.__threading_call__(self.__get_bucket_logging__)
@@ -55,6 +55,7 @@ class S3(AWSService):
if not self.audit_resources or (
is_resource_filtered(arn, self.audit_resources)
):
self.regions_with_buckets.append(bucket_region)
# Check if there are filter regions
if audit_info.audited_regions:
if bucket_region in audit_info.audited_regions:

View File

@@ -6,7 +6,7 @@
"ServiceName": "ssm",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:ssm:region:account-id:document/document-name",
"Severity": "medium",
"Severity": "low",
"ResourceType": "Other",
"Description": "Ensure SSM Incidents is enabled with response plans.",
"Risk": "Not having SSM Incidents enabled can increase the risk of delayed detection and response to security incidents, unauthorized access, limited visibility into incidents and vulnerabilities",

View File

@@ -1,7 +1,7 @@
{
"Provider": "aws",
"CheckID": "vpc_different_regions",
"CheckTitle": "Ensure there are vpcs in more than one region",
"CheckTitle": "Ensure there are VPCs in more than one region",
"CheckType": [
"Infrastructure Security"
],
@@ -10,7 +10,7 @@
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"Severity": "medium",
"ResourceType": "AwsEc2Vpc",
"Description": "Ensure there are vpcs in more than one region",
"Description": "Ensure there are VPCs in more than one region",
"Risk": "",
"RelatedUrl": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Scenario2.html",
"Remediation": {
@@ -21,7 +21,7 @@
"Terraform": ""
},
"Recommendation": {
"Text": "Ensure there are vpcs in more than one region",
"Text": "Ensure there are VPCs in more than one region",
"Url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-example-private-subnets-nat.html"
}
},

View File

@@ -6,21 +6,22 @@ class vpc_flow_logs_enabled(Check):
def execute(self):
findings = []
for vpc in vpc_client.vpcs.values():
report = Check_Report_AWS(self.metadata())
report.region = vpc.region
report.resource_tags = vpc.tags
report.resource_id = vpc.id
report.resource_arn = vpc.arn
report.status = "FAIL"
report.status_extended = (
f"VPC {vpc.name if vpc.name else vpc.id} Flow logs are disabled."
)
if vpc.flow_log:
report.status = "PASS"
if not vpc_client.audit_info.ignore_unused_services or vpc.in_use:
report = Check_Report_AWS(self.metadata())
report.region = vpc.region
report.resource_tags = vpc.tags
report.resource_id = vpc.id
report.resource_arn = vpc.arn
report.status = "FAIL"
report.status_extended = (
f"VPC {vpc.name if vpc.name else vpc.id} Flow logs are enabled."
f"VPC {vpc.name if vpc.name else vpc.id} Flow logs are disabled."
)
if vpc.flow_log:
report.status = "PASS"
report.status_extended = (
f"VPC {vpc.name if vpc.name else vpc.id} Flow logs are enabled."
)
findings.append(report)
findings.append(report)
return findings

View File

@@ -25,6 +25,7 @@ class VPC(AWSService):
self.__describe_flow_logs__()
self.__describe_peering_route_tables__()
self.__describe_vpc_endpoint_service_permissions__()
self.__describe_network_interfaces__()
self.vpc_subnets = {}
self.__threading_call__(self.__describe_vpc_subnets__)
@@ -34,22 +35,27 @@ class VPC(AWSService):
describe_vpcs_paginator = regional_client.get_paginator("describe_vpcs")
for page in describe_vpcs_paginator.paginate():
for vpc in page["Vpcs"]:
arn = f"arn:{self.audited_partition}:ec2:{regional_client.region}:{self.audited_account}:vpc/{vpc['VpcId']}"
if not self.audit_resources or (
is_resource_filtered(arn, self.audit_resources)
):
vpc_name = ""
for tag in vpc.get("Tags", []):
if tag["Key"] == "Name":
vpc_name = tag["Value"]
self.vpcs[vpc["VpcId"]] = VPCs(
arn=arn,
id=vpc["VpcId"],
name=vpc_name,
default=vpc["IsDefault"],
cidr_block=vpc["CidrBlock"],
region=regional_client.region,
tags=vpc.get("Tags"),
try:
arn = f"arn:{self.audited_partition}:ec2:{regional_client.region}:{self.audited_account}:vpc/{vpc['VpcId']}"
if not self.audit_resources or (
is_resource_filtered(arn, self.audit_resources)
):
vpc_name = ""
for tag in vpc.get("Tags", []):
if tag["Key"] == "Name":
vpc_name = tag["Value"]
self.vpcs[vpc["VpcId"]] = VPCs(
arn=arn,
id=vpc["VpcId"],
name=vpc_name,
default=vpc["IsDefault"],
cidr_block=vpc["CidrBlock"],
region=regional_client.region,
tags=vpc.get("Tags"),
)
except Exception as error:
logger.error(
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
@@ -68,21 +74,28 @@ class VPC(AWSService):
if not self.audit_resources or (
is_resource_filtered(arn, self.audit_resources)
):
conn["AccepterVpcInfo"]["CidrBlock"] = None
self.vpc_peering_connections.append(
VpcPeeringConnection(
arn=arn,
id=conn["VpcPeeringConnectionId"],
accepter_vpc=conn["AccepterVpcInfo"]["VpcId"],
accepter_cidr=conn["AccepterVpcInfo"].get("CidrBlock"),
requester_vpc=conn["RequesterVpcInfo"]["VpcId"],
requester_cidr=conn["RequesterVpcInfo"].get(
"CidrBlock"
),
region=regional_client.region,
tags=conn.get("Tags"),
try:
conn["AccepterVpcInfo"]["CidrBlock"] = None
self.vpc_peering_connections.append(
VpcPeeringConnection(
arn=arn,
id=conn["VpcPeeringConnectionId"],
accepter_vpc=conn["AccepterVpcInfo"]["VpcId"],
accepter_cidr=conn["AccepterVpcInfo"].get(
"CidrBlock"
),
requester_vpc=conn["RequesterVpcInfo"]["VpcId"],
requester_cidr=conn["RequesterVpcInfo"].get(
"CidrBlock"
),
region=regional_client.region,
tags=conn.get("Tags"),
)
)
except Exception as error:
logger.error(
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -103,23 +116,29 @@ class VPC(AWSService):
},
]
)["RouteTables"]:
destination_cidrs = []
for route in route_table["Routes"]:
if (
route["Origin"] != "CreateRouteTable"
): # avoid default route table
try:
destination_cidrs = []
for route in route_table["Routes"]:
if (
"DestinationCidrBlock" in route
and "VpcPeeringConnectionId" in route
):
destination_cidrs.append(route["DestinationCidrBlock"])
conn.route_tables.append(
Route(
id=route_table["RouteTableId"],
destination_cidrs=destination_cidrs,
route["Origin"] != "CreateRouteTable"
): # avoid default route table
if (
"DestinationCidrBlock" in route
and "VpcPeeringConnectionId" in route
):
destination_cidrs.append(
route["DestinationCidrBlock"]
)
conn.route_tables.append(
Route(
id=route_table["RouteTableId"],
destination_cidrs=destination_cidrs,
)
)
except Exception as error:
logger.error(
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
)
except Exception as error:
logger.error(
f"{error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}"
@@ -129,20 +148,51 @@ class VPC(AWSService):
logger.info("VPC - Describing flow logs...")
try:
for vpc in self.vpcs.values():
regional_client = self.regional_clients[vpc.region]
flow_logs = regional_client.describe_flow_logs(
Filters=[
{
"Name": "resource-id",
"Values": [
vpc.id,
],
},
]
)["FlowLogs"]
if flow_logs:
vpc.flow_log = True
try:
regional_client = self.regional_clients[vpc.region]
flow_logs = regional_client.describe_flow_logs(
Filters=[
{
"Name": "resource-id",
"Values": [
vpc.id,
],
},
]
)["FlowLogs"]
if flow_logs:
vpc.flow_log = True
except Exception as error:
logger.error(
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}"
)
def __describe_network_interfaces__(self):
logger.info("VPC - Describing flow logs...")
try:
for vpc in self.vpcs.values():
try:
regional_client = self.regional_clients[vpc.region]
enis = regional_client.describe_network_interfaces(
Filters=[
{
"Name": "vpc-id",
"Values": [
vpc.id,
],
},
]
)["NetworkInterfaces"]
if enis:
vpc.in_use = True
except Exception as error:
logger.error(
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}"
@@ -156,25 +206,30 @@ class VPC(AWSService):
)
for page in describe_vpc_endpoints_paginator.paginate():
for endpoint in page["VpcEndpoints"]:
arn = f"arn:{self.audited_partition}:ec2:{regional_client.region}:{self.audited_account}:vpc-endpoint/{endpoint['VpcEndpointId']}"
if not self.audit_resources or (
is_resource_filtered(arn, self.audit_resources)
):
endpoint_policy = None
if endpoint.get("PolicyDocument"):
endpoint_policy = json.loads(endpoint["PolicyDocument"])
self.vpc_endpoints.append(
VpcEndpoint(
arn=arn,
id=endpoint["VpcEndpointId"],
vpc_id=endpoint["VpcId"],
service_name=endpoint["ServiceName"],
state=endpoint["State"],
policy_document=endpoint_policy,
owner_id=endpoint["OwnerId"],
region=regional_client.region,
tags=endpoint.get("Tags"),
try:
arn = f"arn:{self.audited_partition}:ec2:{regional_client.region}:{self.audited_account}:vpc-endpoint/{endpoint['VpcEndpointId']}"
if not self.audit_resources or (
is_resource_filtered(arn, self.audit_resources)
):
endpoint_policy = None
if endpoint.get("PolicyDocument"):
endpoint_policy = json.loads(endpoint["PolicyDocument"])
self.vpc_endpoints.append(
VpcEndpoint(
arn=arn,
id=endpoint["VpcEndpointId"],
vpc_id=endpoint["VpcId"],
service_name=endpoint["ServiceName"],
state=endpoint["State"],
policy_document=endpoint_policy,
owner_id=endpoint["OwnerId"],
region=regional_client.region,
tags=endpoint.get("Tags"),
)
)
except Exception as error:
logger.error(
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
@@ -189,21 +244,26 @@ class VPC(AWSService):
)
for page in describe_vpc_endpoint_services_paginator.paginate():
for endpoint in page["ServiceDetails"]:
if endpoint["Owner"] != "amazon":
arn = f"arn:{self.audited_partition}:ec2:{regional_client.region}:{self.audited_account}:vpc-endpoint-service/{endpoint['ServiceId']}"
if not self.audit_resources or (
is_resource_filtered(arn, self.audit_resources)
):
self.vpc_endpoint_services.append(
VpcEndpointService(
arn=arn,
id=endpoint["ServiceId"],
service=endpoint["ServiceName"],
owner_id=endpoint["Owner"],
region=regional_client.region,
tags=endpoint.get("Tags"),
try:
if endpoint["Owner"] != "amazon":
arn = f"arn:{self.audited_partition}:ec2:{regional_client.region}:{self.audited_account}:vpc-endpoint-service/{endpoint['ServiceId']}"
if not self.audit_resources or (
is_resource_filtered(arn, self.audit_resources)
):
self.vpc_endpoint_services.append(
VpcEndpointService(
arn=arn,
id=endpoint["ServiceId"],
service=endpoint["ServiceName"],
owner_id=endpoint["Owner"],
region=regional_client.region,
tags=endpoint.get("Tags"),
)
)
)
except Exception as error:
logger.error(
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -337,6 +397,7 @@ class VPCs(BaseModel):
id: str
name: str
default: bool
in_use: bool = False
cidr_block: str
flow_log: bool = False
region: str

View File

@@ -1,7 +1,7 @@
{
"Provider": "aws",
"CheckID": "vpc_subnet_different_az",
"CheckTitle": "Ensure all vpc has subnets in more than one availability zone",
"CheckTitle": "Ensure all VPC has subnets in more than one availability zone",
"CheckType": [
"Infrastructure Security"
],
@@ -10,7 +10,7 @@
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"Severity": "medium",
"ResourceType": "AwsEc2Vpc",
"Description": "Ensure all vpc has subnets in more than one availability zone",
"Description": "Ensure all VPC has subnets in more than one availability zone",
"Risk": "",
"RelatedUrl": "https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html",
"Remediation": {
@@ -21,7 +21,7 @@
"Terraform": ""
},
"Recommendation": {
"Text": "Ensure all vpc has subnets in more than one availability zone",
"Text": "Ensure all VPC has subnets in more than one availability zone",
"Url": "https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html"
}
},

View File

@@ -1,7 +1,7 @@
{
"Provider": "aws",
"CheckID": "vpc_subnet_separate_private_public",
"CheckTitle": "Ensure all vpc has public and private subnets defined",
"CheckTitle": "Ensure all VPC has public and private subnets defined",
"CheckType": [
"Infrastructure Security"
],
@@ -10,7 +10,7 @@
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"Severity": "medium",
"ResourceType": "AwsEc2Vpc",
"Description": "Ensure all vpc has public and private subnets defined",
"Description": "Ensure all VPC has public and private subnets defined",
"Risk": "",
"RelatedUrl": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Scenario2.html",
"Remediation": {
@@ -21,7 +21,7 @@
"Terraform": ""
},
"Recommendation": {
"Text": "Ensure all vpc has public and private subnets defined",
"Text": "Ensure all VPC has public and private subnets defined",
"Url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Scenario2.html"
}
},

View File

@@ -109,6 +109,11 @@ Azure Identity Type: {Fore.YELLOW}[{audit_info.identity.identity_type}]{Style.RE
new_boto3_config = current_audit_info.session_config.merge(config)
current_audit_info.session_config = new_boto3_config
# Set ignore unused services argument
current_audit_info.ignore_unused_services = arguments.get(
"ignore_unused_services"
)
# Setting session
current_audit_info.profile = input_profile
current_audit_info.audited_regions = input_regions