mirror of
https://github.com/ghndrx/prowler.git
synced 2026-02-10 14:55:00 +00:00
chore(aws): 2nd round - Improve tests and include dot in status extended (#2714)
This commit is contained in:
@@ -55,7 +55,7 @@ class awslambda_function_no_secrets_in_code(Check):
|
||||
if secrets_findings:
|
||||
final_output_string = "; ".join(secrets_findings)
|
||||
report.status = "FAIL"
|
||||
# report.status_extended = f"Potential {'secrets' if len(secrets_findings)>1 else 'secret'} found in Lambda function {function.name} code. {final_output_string}"
|
||||
# report.status_extended = f"Potential {'secrets' if len(secrets_findings)>1 else 'secret'} found in Lambda function {function.name} code. {final_output_string}."
|
||||
if len(secrets_findings) > 1:
|
||||
report.status_extended = f"Potential secrets found in Lambda function {function.name} code -> {final_output_string}."
|
||||
else:
|
||||
|
||||
@@ -20,10 +20,10 @@ class cloudformation_stacks_termination_protection_enabled(Check):
|
||||
|
||||
if stack.enable_termination_protection:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudFormation {stack.name} has termination protection enabled"
|
||||
report.status_extended = f"CloudFormation {stack.name} has termination protection enabled."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudFormation {stack.name} has termination protection disabled"
|
||||
report.status_extended = f"CloudFormation {stack.name} has termination protection disabled."
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -28,10 +28,10 @@ class codeartifact_packages_external_public_publishing_disabled(Check):
|
||||
== RestrictionValues.ALLOW
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Internal package {package.name} is vulnerable to dependency confusion in repository {repository.arn}"
|
||||
report.status_extended = f"Internal package {package.name} is vulnerable to dependency confusion in repository {repository.arn}."
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Internal package {package.name} is not vulnerable to dependency confusion in repository {repository.arn}"
|
||||
report.status_extended = f"Internal package {package.name} is not vulnerable to dependency confusion in repository {repository.arn}."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -13,17 +13,15 @@ class codebuild_project_older_90_days(Check):
|
||||
report.resource_id = project.name
|
||||
report.resource_arn = project.arn
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"CodeBuild project {project.name} has been invoked in the last 90 days"
|
||||
)
|
||||
report.status_extended = f"CodeBuild project {project.name} has been invoked in the last 90 days."
|
||||
if project.last_invoked_time:
|
||||
if (datetime.now(timezone.utc) - project.last_invoked_time).days > 90:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CodeBuild project {project.name} has not been invoked in the last 90 days"
|
||||
report.status_extended = f"CodeBuild project {project.name} has not been invoked in the last 90 days."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"CodeBuild project {project.name} has never been built"
|
||||
f"CodeBuild project {project.name} has never been built."
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -13,13 +13,13 @@ class codebuild_project_user_controlled_buildspec(Check):
|
||||
report.resource_id = project.name
|
||||
report.resource_arn = project.arn
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CodeBuild project {project.name} does not use an user controlled buildspec"
|
||||
report.status_extended = f"CodeBuild project {project.name} does not use an user controlled buildspec."
|
||||
if project.buildspec:
|
||||
if search(r".*\.yaml$", project.buildspec) or search(
|
||||
r".*\.yml$", project.buildspec
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CodeBuild project {project.name} uses an user controlled buildspec"
|
||||
report.status_extended = f"CodeBuild project {project.name} uses an user controlled buildspec."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -11,13 +11,14 @@ class directoryservice_directory_log_forwarding_enabled(Check):
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = directory.region
|
||||
report.resource_id = directory.id
|
||||
report.resource_arn = directory.arn
|
||||
report.resource_tags = directory.tags
|
||||
if directory.log_subscriptions:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Directory Service {directory.id} have log forwarding to CloudWatch enabled"
|
||||
report.status_extended = f"Directory Service {directory.id} have log forwarding to CloudWatch enabled."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Directory Service {directory.id} have log forwarding to CloudWatch disabled"
|
||||
report.status_extended = f"Directory Service {directory.id} have log forwarding to CloudWatch disabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -11,16 +11,17 @@ class directoryservice_directory_monitor_notifications(Check):
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = directory.region
|
||||
report.resource_id = directory.id
|
||||
report.resource_arn = directory.arn
|
||||
report.resource_tags = directory.tags
|
||||
if directory.event_topics:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Directory Service {directory.id} have SNS messaging enabled"
|
||||
f"Directory Service {directory.id} have SNS messaging enabled."
|
||||
)
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Directory Service {directory.id} have SNS messaging disabled"
|
||||
f"Directory Service {directory.id} have SNS messaging disabled."
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -14,11 +14,12 @@ class directoryservice_directory_snapshots_limit(Check):
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = directory.region
|
||||
report.resource_id = directory.id
|
||||
report.resource_arn = directory.arn
|
||||
report.resource_tags = directory.tags
|
||||
if directory.snapshots_limits:
|
||||
if directory.snapshots_limits.manual_snapshots_limit_reached:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Directory Service {directory.id} reached {directory.snapshots_limits.manual_snapshots_limit} Snapshots limit"
|
||||
report.status_extended = f"Directory Service {directory.id} reached {directory.snapshots_limits.manual_snapshots_limit} Snapshots limit."
|
||||
else:
|
||||
limit_remaining = (
|
||||
directory.snapshots_limits.manual_snapshots_limit
|
||||
@@ -26,10 +27,10 @@ class directoryservice_directory_snapshots_limit(Check):
|
||||
)
|
||||
if limit_remaining <= SNAPSHOT_LIMIT_THRESHOLD:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Directory Service {directory.id} is about to reach {directory.snapshots_limits.manual_snapshots_limit} Snapshots which is the limit"
|
||||
report.status_extended = f"Directory Service {directory.id} is about to reach {directory.snapshots_limits.manual_snapshots_limit} Snapshots which is the limit."
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Directory Service {directory.id} is using {directory.snapshots_limits.manual_snapshots_current_count} out of {directory.snapshots_limits.manual_snapshots_limit} from the Snapshots Limit"
|
||||
report.status_extended = f"Directory Service {directory.id} is using {directory.snapshots_limits.manual_snapshots_current_count} out of {directory.snapshots_limits.manual_snapshots_limit} from the Snapshots Limit."
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -17,6 +17,7 @@ class directoryservice_ldap_certificate_expiration(Check):
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = directory.region
|
||||
report.resource_id = certificate.id
|
||||
report.resource_arn = directory.arn
|
||||
report.resource_tags = directory.tags
|
||||
|
||||
remaining_days_to_expire = (
|
||||
@@ -30,10 +31,10 @@ class directoryservice_ldap_certificate_expiration(Check):
|
||||
|
||||
if remaining_days_to_expire <= DAYS_TO_EXPIRE_THRESHOLD:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"LDAP Certificate {certificate.id} configured at {directory.id} is about to expire in {remaining_days_to_expire} days"
|
||||
report.status_extended = f"LDAP Certificate {certificate.id} configured at {directory.id} is about to expire in {remaining_days_to_expire} days."
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"LDAP Certificate {certificate.id} configured at {directory.id} expires in {remaining_days_to_expire} days"
|
||||
report.status_extended = f"LDAP Certificate {certificate.id} configured at {directory.id} expires in {remaining_days_to_expire} days."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -15,16 +15,17 @@ class directoryservice_radius_server_security_protocol(Check):
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = directory.region
|
||||
report.resource_id = directory.id
|
||||
report.resource_arn = directory.arn
|
||||
report.resource_tags = directory.tags
|
||||
if (
|
||||
directory.radius_settings.authentication_protocol
|
||||
== AuthenticationProtocol.MS_CHAPv2
|
||||
):
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Radius server of Directory {directory.id} have recommended security protocol for the Radius server"
|
||||
report.status_extended = f"Radius server of Directory {directory.id} have recommended security protocol for the Radius server."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Radius server of Directory {directory.id} does not have recommended security protocol for the Radius server"
|
||||
report.status_extended = f"Radius server of Directory {directory.id} does not have recommended security protocol for the Radius server."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -37,8 +37,9 @@ class DirectoryService(AWSService):
|
||||
)
|
||||
):
|
||||
directory_id = directory["DirectoryId"]
|
||||
directory_arn = f"arn:{self.audited_partition}:ds:{regional_client.region}:{self.audited_account}:directory/{directory_id}"
|
||||
directory_name = directory["Name"]
|
||||
directory_type = DirectoryType(directory["Type"])
|
||||
directory_type = directory["Type"]
|
||||
# Radius Configuration
|
||||
radius_authentication_protocol = (
|
||||
AuthenticationProtocol(
|
||||
@@ -56,6 +57,7 @@ class DirectoryService(AWSService):
|
||||
self.directories[directory_id] = Directory(
|
||||
name=directory_name,
|
||||
id=directory_id,
|
||||
arn=directory_arn,
|
||||
type=directory_type,
|
||||
region=regional_client.region,
|
||||
radius_settings=RadiusSettings(
|
||||
@@ -297,6 +299,7 @@ class DirectoryType(Enum):
|
||||
class Directory(BaseModel):
|
||||
name: str
|
||||
id: str
|
||||
arn: str
|
||||
type: DirectoryType
|
||||
log_subscriptions: list[LogSubscriptions] = []
|
||||
event_topics: list[EventTopics] = []
|
||||
|
||||
@@ -15,16 +15,17 @@ class directoryservice_supported_mfa_radius_enabled(Check):
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = directory.region
|
||||
report.resource_id = directory.id
|
||||
report.resource_arn = directory.arn
|
||||
report.resource_tags = directory.tags
|
||||
if directory.radius_settings.status == RadiusStatus.Completed:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Directory {directory.id} have Radius MFA enabled"
|
||||
f"Directory {directory.id} have Radius MFA enabled."
|
||||
)
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Directory {directory.id} does not have Radius MFA enabled"
|
||||
f"Directory {directory.id} does not have Radius MFA enabled."
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -20,7 +20,7 @@ class ec2_elastic_ip_shodan(Check):
|
||||
try:
|
||||
shodan_info = api.host(eip.public_ip)
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Elastic IP {eip.public_ip} listed in Shodan with open ports {str(shodan_info['ports'])} and ISP {shodan_info['isp']} in {shodan_info['country_name']}. More info https://www.shodan.io/host/{eip.public_ip}"
|
||||
report.status_extended = f"Elastic IP {eip.public_ip} listed in Shodan with open ports {str(shodan_info['ports'])} and ISP {shodan_info['isp']} in {shodan_info['country_name']}. More info at https://www.shodan.io/host/{eip.public_ip}."
|
||||
report.resource_id = eip.public_ip
|
||||
findings.append(report)
|
||||
except shodan.APIError as error:
|
||||
|
||||
@@ -18,7 +18,7 @@ class ec2_securitygroup_with_many_ingress_egress_rules(Check):
|
||||
report.resource_arn = security_group.arn
|
||||
report.resource_tags = security_group.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has {len(security_group.ingress_rules)} inbound rules and {len(security_group.egress_rules)} outbound rules"
|
||||
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has {len(security_group.ingress_rules)} inbound rules and {len(security_group.egress_rules)} outbound rules."
|
||||
if (
|
||||
len(security_group.ingress_rules) > max_security_group_rules
|
||||
or len(security_group.egress_rules) > max_security_group_rules
|
||||
|
||||
@@ -14,17 +14,17 @@ class ecr_registry_scan_images_on_push_enabled(Check):
|
||||
# A registry cannot have tags
|
||||
report.resource_tags = []
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ECR registry {registry.id} has {registry.scan_type} scanning without scan on push enabled"
|
||||
report.status_extended = f"ECR registry {registry.id} has {registry.scan_type} scanning without scan on push enabled."
|
||||
if registry.rules:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"ECR registry {registry.id} has {registry.scan_type} scan with scan on push enabled"
|
||||
report.status_extended = f"ECR registry {registry.id} has {registry.scan_type} scan with scan on push enabled."
|
||||
filters = True
|
||||
for rule in registry.rules:
|
||||
if not rule.scan_filters or "'*'" in str(rule.scan_filters):
|
||||
filters = False
|
||||
if filters:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ECR registry {registry.id} has {registry.scan_type} scanning with scan on push but with repository filters"
|
||||
report.status_extended = f"ECR registry {registry.id} has {registry.scan_type} scanning with scan on push but with repository filters."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ class ecr_repositories_not_publicly_accessible(Check):
|
||||
report.resource_tags = repository.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Repository {repository.name} is not publicly accesible"
|
||||
f"Repository {repository.name} is not publicly accesible."
|
||||
)
|
||||
if repository.policy:
|
||||
for statement in repository.policy["Statement"]:
|
||||
@@ -24,7 +24,7 @@ class ecr_repositories_not_publicly_accessible(Check):
|
||||
and "*" in statement["Principal"]["AWS"]
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Repository {repository.name} policy may allow anonymous users to perform actions (Principal: '*')"
|
||||
report.status_extended = f"Repository {repository.name} policy may allow anonymous users to perform actions (Principal: '*')."
|
||||
break
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -14,12 +14,12 @@ class ecr_repositories_scan_images_on_push_enabled(Check):
|
||||
report.resource_tags = repository.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"ECR repository {repository.name} has scan on push enabled"
|
||||
f"ECR repository {repository.name} has scan on push enabled."
|
||||
)
|
||||
if not repository.scan_on_push:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"ECR repository {repository.name} has scan on push disabled"
|
||||
f"ECR repository {repository.name} has scan on push disabled."
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -18,14 +18,14 @@ class ecr_repositories_scan_vulnerabilities_in_latest_image(Check):
|
||||
report.resource_arn = repository.arn
|
||||
report.resource_tags = repository.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned without findings"
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned without findings."
|
||||
if not image.scan_findings_status:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} without a scan"
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} without a scan."
|
||||
elif image.scan_findings_status == "FAILED":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"ECR repository {repository.name} with scan status FAILED"
|
||||
f"ECR repository {repository.name} with scan status FAILED."
|
||||
)
|
||||
elif image.scan_findings_status != "FAILED":
|
||||
if image.scan_findings_severity_count and (
|
||||
@@ -34,7 +34,7 @@ class ecr_repositories_scan_vulnerabilities_in_latest_image(Check):
|
||||
or image.scan_findings_severity_count.medium
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}, MEDIUM->{image.scan_findings_severity_count.medium} "
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}, MEDIUM->{image.scan_findings_severity_count.medium}."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ class ecs_task_definitions_no_environment_secrets(Check):
|
||||
report.resource_arn = task_definition.arn
|
||||
report.resource_tags = task_definition.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"No secrets found in variables of ECS task definition {task_definition.name} with revision {task_definition.revision}"
|
||||
report.status_extended = f"No secrets found in variables of ECS task definition {task_definition.name} with revision {task_definition.revision}."
|
||||
if task_definition.environment_variables:
|
||||
dump_env_vars = {}
|
||||
for env_var in task_definition.environment_variables:
|
||||
@@ -44,7 +44,7 @@ class ecs_task_definitions_no_environment_secrets(Check):
|
||||
]
|
||||
)
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Potential secret found in variables of ECS task definition {task_definition.name} with revision {task_definition.revision} -> {secrets_string}"
|
||||
report.status_extended = f"Potential secret found in variables of ECS task definition {task_definition.name} with revision {task_definition.revision} -> {secrets_string}."
|
||||
|
||||
os.remove(temp_env_data_file.name)
|
||||
|
||||
|
||||
@@ -13,11 +13,11 @@ class efs_encryption_at_rest_enabled(Check):
|
||||
report.resource_tags = fs.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"EFS {fs.id} does not have encryption at rest enabled"
|
||||
f"EFS {fs.id} does not have encryption at rest enabled."
|
||||
)
|
||||
if fs.encrypted:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"EFS {fs.id} has encryption at rest enabled"
|
||||
report.status_extended = f"EFS {fs.id} has encryption at rest enabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -13,11 +13,11 @@ class efs_not_publicly_accessible(Check):
|
||||
report.resource_tags = fs.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"EFS {fs.id} has a policy which does not allow access to everyone"
|
||||
f"EFS {fs.id} has a policy which does not allow access to everyone."
|
||||
)
|
||||
if not fs.policy:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"EFS {fs.id} doesn't have any policy which means it grants full access to any client"
|
||||
report.status_extended = f"EFS {fs.id} doesn't have any policy which means it grants full access to any client."
|
||||
else:
|
||||
for statement in fs.policy["Statement"]:
|
||||
if statement["Effect"] == "Allow":
|
||||
@@ -34,7 +34,7 @@ class efs_not_publicly_accessible(Check):
|
||||
)
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"EFS {fs.id} has a policy which allows access to everyone"
|
||||
report.status_extended = f"EFS {fs.id} has a policy which allows access to everyone."
|
||||
break
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -13,14 +13,14 @@ class eks_control_plane_endpoint_access_restricted(Check):
|
||||
report.resource_tags = cluster.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Cluster endpoint access is private for EKS cluster {cluster.name}"
|
||||
f"Cluster endpoint access is private for EKS cluster {cluster.name}."
|
||||
)
|
||||
if cluster.endpoint_public_access and not cluster.endpoint_private_access:
|
||||
if "0.0.0.0/0" in cluster.public_access_cidrs:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Cluster control plane access is not restricted for EKS cluster {cluster.name}"
|
||||
report.status_extended = f"Cluster control plane access is not restricted for EKS cluster {cluster.name}."
|
||||
else:
|
||||
report.status_extended = f"Cluster control plane access is restricted for EKS cluster {cluster.name}"
|
||||
report.status_extended = f"Cluster control plane access is restricted for EKS cluster {cluster.name}."
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -13,7 +13,7 @@ class eks_control_plane_logging_all_types_enabled(Check):
|
||||
report.resource_tags = cluster.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Control plane logging is not enabled for EKS cluster {cluster.name}"
|
||||
f"Control plane logging is not enabled for EKS cluster {cluster.name}."
|
||||
)
|
||||
if cluster.logging and cluster.logging.enabled:
|
||||
if all(
|
||||
@@ -27,9 +27,9 @@ class eks_control_plane_logging_all_types_enabled(Check):
|
||||
]
|
||||
):
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Control plane logging enabled and correctly configured for EKS cluster {cluster.name}"
|
||||
report.status_extended = f"Control plane logging enabled and correctly configured for EKS cluster {cluster.name}."
|
||||
else:
|
||||
report.status_extended = f"Control plane logging enabled but not all log types collected for EKS cluster {cluster.name}"
|
||||
report.status_extended = f"Control plane logging enabled but not all log types collected for EKS cluster {cluster.name}."
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -13,12 +13,12 @@ class eks_endpoints_not_publicly_accessible(Check):
|
||||
report.resource_tags = cluster.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Cluster endpoint access is private for EKS cluster {cluster.name}"
|
||||
f"Cluster endpoint access is private for EKS cluster {cluster.name}."
|
||||
)
|
||||
if cluster.endpoint_public_access and not cluster.endpoint_private_access:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Cluster endpoint access is public for EKS cluster {cluster.name}"
|
||||
f"Cluster endpoint access is public for EKS cluster {cluster.name}."
|
||||
)
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -17,9 +17,9 @@ class elbv2_desync_mitigation_mode(Check):
|
||||
if lb.desync_mitigation_mode == "monitor":
|
||||
if lb.drop_invalid_header_fields == "false":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ELBv2 ALB {lb.name} does not have desync mitigation mode set as defensive or strictest and is not dropping invalid header fields"
|
||||
report.status_extended = f"ELBv2 ALB {lb.name} does not have desync mitigation mode set as defensive or strictest and is not dropping invalid header fields."
|
||||
elif lb.drop_invalid_header_fields == "true":
|
||||
report.status_extended = f"ELBv2 ALB {lb.name} does not have desync mitigation mode set as defensive or strictest but is dropping invalid header fields"
|
||||
report.status_extended = f"ELBv2 ALB {lb.name} does not have desync mitigation mode set as defensive or strictest but is dropping invalid header fields."
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -14,10 +14,10 @@ class emr_cluster_account_public_block_enabled(Check):
|
||||
region
|
||||
].block_public_security_group_rules:
|
||||
report.status = "PASS"
|
||||
report.status_extended = "EMR Account has Block Public Access enabled"
|
||||
report.status_extended = "EMR Account has Block Public Access enabled."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "EMR Account has Block Public Access disabled"
|
||||
report.status_extended = "EMR Account has Block Public Access disabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ class emr_cluster_publicly_accesible(Check):
|
||||
report.resource_tags = cluster.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"EMR Cluster {cluster.id} is not publicly accessible"
|
||||
f"EMR Cluster {cluster.id} is not publicly accessible."
|
||||
)
|
||||
# If EMR cluster is Public, it is required to check
|
||||
# their Security Groups for the Master,
|
||||
|
||||
@@ -11,13 +11,13 @@ class fms_policy_compliant(Check):
|
||||
report.resource_id = fms_client.audited_account
|
||||
report.region = fms_client.region
|
||||
report.status = "PASS"
|
||||
report.status_extended = "FMS enabled with all compliant accounts"
|
||||
report.status_extended = "FMS enabled with all compliant accounts."
|
||||
non_compliant_policy = False
|
||||
for policy in fms_client.fms_policies:
|
||||
for policy_to_account in policy.compliance_status:
|
||||
if policy_to_account.status == "NON_COMPLIANT":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"FMS with non-compliant policy {policy.name} for account {policy_to_account.account_id}"
|
||||
report.status_extended = f"FMS with non-compliant policy {policy.name} for account {policy_to_account.account_id}."
|
||||
report.resource_id = policy.id
|
||||
report.resource_arn = policy.arn
|
||||
non_compliant_policy = True
|
||||
|
||||
@@ -12,9 +12,7 @@ class glacier_vaults_policy_public_access(Check):
|
||||
report.resource_arn = vault.arn
|
||||
report.resource_tags = vault.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Vault {vault.name} has policy which does not allow access to everyone"
|
||||
)
|
||||
report.status_extended = f"Vault {vault.name} has policy which does not allow access to everyone."
|
||||
|
||||
public_access = False
|
||||
if vault.access_policy:
|
||||
@@ -35,11 +33,11 @@ class glacier_vaults_policy_public_access(Check):
|
||||
public_access = True
|
||||
break
|
||||
else:
|
||||
report.status_extended = f"Vault {vault.name} does not have a policy"
|
||||
report.status_extended = f"Vault {vault.name} does not have a policy."
|
||||
if public_access:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Vault {vault.name} has policy which allows access to everyone"
|
||||
f"Vault {vault.name} has policy which allows access to everyone."
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -14,14 +14,14 @@ class guardduty_centrally_managed(Check):
|
||||
report.resource_tags = detector.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"GuardDuty detector {detector.id} is not centrally managed"
|
||||
f"GuardDuty detector {detector.id} is not centrally managed."
|
||||
)
|
||||
if detector.administrator_account:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"GuardDuty detector {detector.id} is centrally managed by account {detector.administrator_account}"
|
||||
report.status_extended = f"GuardDuty detector {detector.id} is centrally managed by account {detector.administrator_account}."
|
||||
elif detector.member_accounts:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"GuardDuty detector {detector.id} is administrator account with {len(detector.member_accounts)} member accounts"
|
||||
report.status_extended = f"GuardDuty detector {detector.id} is administrator account with {len(detector.member_accounts)} member accounts."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -12,19 +12,19 @@ class guardduty_is_enabled(Check):
|
||||
report.resource_arn = detector.arn
|
||||
report.resource_tags = detector.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"GuardDuty detector {detector.id} enabled"
|
||||
report.status_extended = f"GuardDuty detector {detector.id} enabled."
|
||||
if not detector.id:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "GuardDuty is not enabled"
|
||||
report.status_extended = "GuardDuty is not enabled."
|
||||
elif detector.status is None:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"GuardDuty detector {detector.id} not configured"
|
||||
f"GuardDuty detector {detector.id} not configured."
|
||||
)
|
||||
elif not detector.status:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"GuardDuty detector {detector.id} configured but suspended"
|
||||
f"GuardDuty detector {detector.id} configured but suspended."
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -16,7 +16,7 @@ class guardduty_no_high_severity_findings(Check):
|
||||
report.status_extended = f"GuardDuty detector {detector.id} does not have high severity findings."
|
||||
if len(detector.findings) > 0:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"GuardDuty detector {detector.id} has {str(len(detector.findings))} high severity findings"
|
||||
report.status_extended = f"GuardDuty detector {detector.id} has {str(len(detector.findings))} high severity findings."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ class iam_aws_attached_policy_no_administrative_privileges(Check):
|
||||
report.resource_id = policy.name
|
||||
report.resource_tags = policy.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"{policy.type} policy {policy.name} is attached but does not allow '*:*' administrative privileges"
|
||||
report.status_extended = f"{policy.type} policy {policy.name} is attached but does not allow '*:*' administrative privileges."
|
||||
if policy.document:
|
||||
# Check the statements, if one includes *:* stop iterating over the rest
|
||||
if not isinstance(policy.document["Statement"], list):
|
||||
@@ -36,7 +36,7 @@ class iam_aws_attached_policy_no_administrative_privileges(Check):
|
||||
)
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"{policy.type} policy {policy.name} is attached and allows '*:*' administrative privileges"
|
||||
report.status_extended = f"{policy.type} policy {policy.name} is attached and allows '*:*' administrative privileges."
|
||||
break
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -13,7 +13,9 @@ class iam_check_saml_providers_sts(Check):
|
||||
report.resource_arn = provider["Arn"]
|
||||
report.region = iam_client.region
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"SAML Provider {provider_name} has been found"
|
||||
report.status_extended = (
|
||||
f"SAML Provider {provider_name} has been found."
|
||||
)
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -14,7 +14,7 @@ class iam_customer_attached_policy_no_administrative_privileges(Check):
|
||||
report.resource_id = policy.name
|
||||
report.resource_tags = policy.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"{policy.type} policy {policy.name} is attached but does not allow '*:*' administrative privileges"
|
||||
report.status_extended = f"{policy.type} policy {policy.name} is attached but does not allow '*:*' administrative privileges."
|
||||
if policy.document:
|
||||
# Check the statements, if one includes *:* stop iterating over the rest
|
||||
if not isinstance(policy.document["Statement"], list):
|
||||
@@ -36,7 +36,7 @@ class iam_customer_attached_policy_no_administrative_privileges(Check):
|
||||
)
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"{policy.type} policy {policy.name} is attached and allows '*:*' administrative privileges"
|
||||
report.status_extended = f"{policy.type} policy {policy.name} is attached and allows '*:*' administrative privileges."
|
||||
break
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -14,7 +14,7 @@ class iam_customer_unattached_policy_no_administrative_privileges(Check):
|
||||
report.resource_id = policy.name
|
||||
report.resource_tags = policy.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"{policy.type} policy {policy.name} is unattached and does not allow '*:*' administrative privileges"
|
||||
report.status_extended = f"{policy.type} policy {policy.name} is unattached and does not allow '*:*' administrative privileges."
|
||||
if policy.document:
|
||||
# Check the statements, if one includes *:* stop iterating over the rest
|
||||
if not isinstance(policy.document["Statement"], list):
|
||||
@@ -36,7 +36,7 @@ class iam_customer_unattached_policy_no_administrative_privileges(Check):
|
||||
)
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"{policy.type} policy {policy.name} is unattached and allows '*:*' administrative privileges"
|
||||
report.status_extended = f"{policy.type} policy {policy.name} is unattached and allows '*:*' administrative privileges."
|
||||
break
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -14,7 +14,7 @@ class iam_no_custom_policy_permissive_role_assumption(Check):
|
||||
report.resource_id = policy.name
|
||||
report.resource_tags = policy.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Custom Policy {policy.name} does not allow permissive STS Role assumption"
|
||||
report.status_extended = f"Custom Policy {policy.name} does not allow permissive STS Role assumption."
|
||||
if policy.document:
|
||||
if not isinstance(policy.document["Statement"], list):
|
||||
policy_statements = [policy.document["Statement"]]
|
||||
@@ -35,7 +35,7 @@ class iam_no_custom_policy_permissive_role_assumption(Check):
|
||||
or action == "*"
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Custom Policy {policy.name} allows permissive STS Role assumption"
|
||||
report.status_extended = f"Custom Policy {policy.name} allows permissive STS Role assumption."
|
||||
break
|
||||
else:
|
||||
if (
|
||||
@@ -44,7 +44,7 @@ class iam_no_custom_policy_permissive_role_assumption(Check):
|
||||
or statement["Action"] == "*"
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Custom Policy {policy.name} allows permissive STS Role assumption"
|
||||
report.status_extended = f"Custom Policy {policy.name} allows permissive STS Role assumption."
|
||||
break
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -22,6 +22,6 @@ class iam_password_policy_lowercase(Check):
|
||||
report.status_extended = "IAM password policy does not require at least one lowercase letter."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "Password policy cannot be found"
|
||||
report.status_extended = "Password policy cannot be found."
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -25,6 +25,6 @@ class iam_password_policy_minimum_length_14(Check):
|
||||
report.status_extended = "IAM password policy does not require minimum length of 14 characters."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "Password policy cannot be found"
|
||||
report.status_extended = "Password policy cannot be found."
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -24,6 +24,6 @@ class iam_password_policy_number(Check):
|
||||
)
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "There is no password policy."
|
||||
report.status_extended = "Password policy cannot be found."
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -27,6 +27,6 @@ class iam_password_policy_reuse_24(Check):
|
||||
)
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "Password policy cannot be found"
|
||||
report.status_extended = "Password policy cannot be found."
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -24,6 +24,6 @@ class iam_password_policy_symbol(Check):
|
||||
)
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "There is no password policy."
|
||||
report.status_extended = "Password policy cannot be found."
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -22,6 +22,6 @@ class iam_password_policy_uppercase(Check):
|
||||
report.status_extended = "IAM password policy does not require at least one uppercase letter."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "There is no password policy."
|
||||
report.status_extended = "Password policy cannot be found."
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -100,7 +100,7 @@ class iam_policy_allows_privilege_escalation(Check):
|
||||
report.region = iam_client.region
|
||||
report.resource_tags = policy.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Custom Policy {report.resource_arn} does not allow privilege escalation"
|
||||
report.status_extended = f"Custom Policy {report.resource_arn} does not allow privilege escalation."
|
||||
|
||||
# List of policy actions
|
||||
allowed_actions = set()
|
||||
@@ -186,6 +186,9 @@ class iam_policy_allows_privilege_escalation(Check):
|
||||
+ " "
|
||||
)
|
||||
|
||||
report.status_extended = f"Custom Policy {report.resource_arn} allows privilege escalation using the following actions: {policies_affected}".rstrip()
|
||||
report.status_extended = (
|
||||
f"Custom Policy {report.resource_arn} allows privilege escalation using the following actions: {policies_affected}".rstrip()
|
||||
+ "."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -16,7 +16,7 @@ class iam_policy_no_full_access_to_cloudtrail(Check):
|
||||
report.resource_id = policy.name
|
||||
report.resource_tags = policy.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Custom Policy {policy.name} does not allow '{critical_service}:*' privileges"
|
||||
report.status_extended = f"Custom Policy {policy.name} does not allow '{critical_service}:*' privileges."
|
||||
if policy.document:
|
||||
if not isinstance(policy.document["Statement"], list):
|
||||
policy_statements = [policy.document["Statement"]]
|
||||
@@ -34,7 +34,7 @@ class iam_policy_no_full_access_to_cloudtrail(Check):
|
||||
)
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Custom Policy {policy.name} allows '{critical_service}:*' privileges"
|
||||
report.status_extended = f"Custom Policy {policy.name} allows '{critical_service}:*' privileges."
|
||||
break
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -16,7 +16,7 @@ class iam_policy_no_full_access_to_kms(Check):
|
||||
report.resource_id = policy.name
|
||||
report.resource_tags = policy.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Custom Policy {policy.name} does not allow '{critical_service}:*' privileges"
|
||||
report.status_extended = f"Custom Policy {policy.name} does not allow '{critical_service}:*' privileges."
|
||||
if policy.document:
|
||||
if not isinstance(policy.document["Statement"], list):
|
||||
policy_statements = [policy.document["Statement"]]
|
||||
@@ -34,7 +34,7 @@ class iam_policy_no_full_access_to_kms(Check):
|
||||
)
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Custom Policy {policy.name} allows '{critical_service}:*' privileges"
|
||||
report.status_extended = f"Custom Policy {policy.name} allows '{critical_service}:*' privileges."
|
||||
break
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -17,7 +17,7 @@ class iam_role_cross_service_confused_deputy_prevention(Check):
|
||||
report.resource_id = role.name
|
||||
report.resource_tags = role.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"IAM Service Role {role.name} does not prevent against a cross-service confused deputy attack"
|
||||
report.status_extended = f"IAM Service Role {role.name} does not prevent against a cross-service confused deputy attack."
|
||||
for statement in role.assume_role_policy["Statement"]:
|
||||
if (
|
||||
statement["Effect"] == "Allow"
|
||||
@@ -35,7 +35,7 @@ class iam_role_cross_service_confused_deputy_prevention(Check):
|
||||
)
|
||||
):
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"IAM Service Role {role.name} prevents against a cross-service confused deputy attack"
|
||||
report.status_extended = f"IAM Service Role {role.name} prevents against a cross-service confused deputy attack."
|
||||
break
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -11,9 +11,9 @@ class iam_securityaudit_role_created(Check):
|
||||
report.resource_arn = "arn:aws:iam::aws:policy/SecurityAudit"
|
||||
if iam_client.entities_role_attached_to_securityaudit_policy:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"SecurityAudit policy attached to role {iam_client.entities_role_attached_to_securityaudit_policy[0]['RoleName']}"
|
||||
report.status_extended = f"SecurityAudit policy attached to role {iam_client.entities_role_attached_to_securityaudit_policy[0]['RoleName']}."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "SecurityAudit policy is not attached to any role"
|
||||
report.status_extended = "SecurityAudit policy is not attached to any role."
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -13,9 +13,9 @@ class iam_support_role_created(Check):
|
||||
)
|
||||
if iam_client.entities_role_attached_to_support_policy:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Support policy attached to role {iam_client.entities_role_attached_to_support_policy[0]['RoleName']}"
|
||||
report.status_extended = f"Support policy attached to role {iam_client.entities_role_attached_to_support_policy[0]['RoleName']}."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "Support policy is not attached to any role"
|
||||
report.status_extended = "Support policy is not attached to any role."
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -26,7 +26,7 @@ class iam_user_no_setup_initial_access_key(Check):
|
||||
report.resource_arn = user_record["arn"]
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"User {user_record['user']} has never used access key 1"
|
||||
f"User {user_record['user']} has never used access key 1."
|
||||
)
|
||||
findings.append(report)
|
||||
if (
|
||||
@@ -40,7 +40,7 @@ class iam_user_no_setup_initial_access_key(Check):
|
||||
report.resource_arn = user_record["arn"]
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"User {user_record['user']} has never used access key 2"
|
||||
f"User {user_record['user']} has never used access key 2."
|
||||
)
|
||||
findings.append(report)
|
||||
else:
|
||||
@@ -49,7 +49,7 @@ class iam_user_no_setup_initial_access_key(Check):
|
||||
report.resource_id = user_record["user"]
|
||||
report.resource_arn = user_record["arn"]
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"User {user_record['user']} does not have access keys or uses the access keys configured"
|
||||
report.status_extended = f"User {user_record['user']} does not have access keys or uses the access keys configured."
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -17,13 +17,13 @@ class inspector2_findings_exist(Check):
|
||||
if inspector.status == "ENABLED":
|
||||
active_findings = 0
|
||||
report.status = "PASS"
|
||||
report.status_extended = "Inspector2 is enabled with no findings"
|
||||
report.status_extended = "Inspector2 is enabled with no findings."
|
||||
for finding in inspector.findings:
|
||||
if finding.status == "ACTIVE":
|
||||
active_findings += 1
|
||||
if len(inspector.findings) > 0:
|
||||
report.status_extended = (
|
||||
"Inspector2 is enabled with no active findings"
|
||||
"Inspector2 is enabled with no active findings."
|
||||
)
|
||||
if active_findings > 0:
|
||||
report.status = "FAIL"
|
||||
|
||||
@@ -26,7 +26,7 @@ class kms_key_not_publicly_accessible(Check):
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"KMS key {key.id} may be publicly accessible!"
|
||||
f"KMS key {key.id} may be publicly accessible."
|
||||
)
|
||||
elif (
|
||||
"Principal" in statement and "AWS" in statement["Principal"]
|
||||
@@ -42,7 +42,7 @@ class kms_key_not_publicly_accessible(Check):
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"KMS key {key.id} may be publicly accessible!"
|
||||
f"KMS key {key.id} may be publicly accessible."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -15,13 +15,13 @@ class opensearch_service_domains_audit_logging_enabled(Check):
|
||||
report.resource_tags = domain.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Opensearch domain {domain.name} AUDIT_LOGS disabled"
|
||||
f"Opensearch domain {domain.name} AUDIT_LOGS disabled."
|
||||
)
|
||||
for logging_item in domain.logging:
|
||||
if logging_item.name == "AUDIT_LOGS" and logging_item.enabled:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Opensearch domain {domain.name} AUDIT_LOGS enabled"
|
||||
f"Opensearch domain {domain.name} AUDIT_LOGS enabled."
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -14,7 +14,7 @@ class opensearch_service_domains_cloudwatch_logging_enabled(Check):
|
||||
report.resource_arn = domain.arn
|
||||
report.resource_tags = domain.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Opensearch domain {domain.name} SEARCH_SLOW_LOGS and INDEX_SLOW_LOGS disabled"
|
||||
report.status_extended = f"Opensearch domain {domain.name} SEARCH_SLOW_LOGS and INDEX_SLOW_LOGS disabled."
|
||||
has_SEARCH_SLOW_LOGS = False
|
||||
has_INDEX_SLOW_LOGS = False
|
||||
for logging_item in domain.logging:
|
||||
@@ -25,13 +25,13 @@ class opensearch_service_domains_cloudwatch_logging_enabled(Check):
|
||||
|
||||
if has_SEARCH_SLOW_LOGS and has_INDEX_SLOW_LOGS:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Opensearch domain {domain.name} SEARCH_SLOW_LOGS and INDEX_SLOW_LOGS enabled"
|
||||
report.status_extended = f"Opensearch domain {domain.name} SEARCH_SLOW_LOGS and INDEX_SLOW_LOGS enabled."
|
||||
elif not has_SEARCH_SLOW_LOGS and has_INDEX_SLOW_LOGS:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Opensearch domain {domain.name} INDEX_SLOW_LOGS enabled but SEARCH_SLOW_LOGS disabled"
|
||||
report.status_extended = f"Opensearch domain {domain.name} INDEX_SLOW_LOGS enabled but SEARCH_SLOW_LOGS disabled."
|
||||
elif not has_INDEX_SLOW_LOGS and has_SEARCH_SLOW_LOGS:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Opensearch domain {domain.name} SEARCH_SLOW_LOGS enabled but INDEX_SLOW_LOGS disabled"
|
||||
report.status_extended = f"Opensearch domain {domain.name} SEARCH_SLOW_LOGS enabled but INDEX_SLOW_LOGS disabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -15,11 +15,11 @@ class opensearch_service_domains_encryption_at_rest_enabled(Check):
|
||||
report.resource_tags = domain.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Opensearch domain {domain.name} has encryption at-rest enabled"
|
||||
f"Opensearch domain {domain.name} has encryption at-rest enabled."
|
||||
)
|
||||
if not domain.encryption_at_rest:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Opensearch domain {domain.name} does not have encryption at-rest enabled"
|
||||
report.status_extended = f"Opensearch domain {domain.name} does not have encryption at-rest enabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -15,11 +15,11 @@ class opensearch_service_domains_https_communications_enforced(Check):
|
||||
report.resource_tags = domain.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Opensearch domain {domain.name} has enforce HTTPS enabled"
|
||||
f"Opensearch domain {domain.name} has enforce HTTPS enabled."
|
||||
)
|
||||
if not domain.enforce_https:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Opensearch domain {domain.name} does not have enforce HTTPS enabled"
|
||||
report.status_extended = f"Opensearch domain {domain.name} does not have enforce HTTPS enabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -14,10 +14,10 @@ class opensearch_service_domains_internal_user_database_enabled(Check):
|
||||
report.resource_arn = domain.arn
|
||||
report.resource_tags = domain.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Opensearch domain {domain.name} does not have internal user database enabled"
|
||||
report.status_extended = f"Opensearch domain {domain.name} does not have internal user database enabled."
|
||||
if domain.internal_user_database:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Opensearch domain {domain.name} has internal user database enabled"
|
||||
report.status_extended = f"Opensearch domain {domain.name} has internal user database enabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -15,11 +15,11 @@ class opensearch_service_domains_node_to_node_encryption_enabled(Check):
|
||||
report.resource_tags = domain.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Opensearch domain {domain.name} has node-to-node encryption enabled"
|
||||
f"Opensearch domain {domain.name} has node-to-node encryption enabled."
|
||||
)
|
||||
if not domain.node_to_node_encryption:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Opensearch domain {domain.name} does not have node-to-node encryption enabled"
|
||||
report.status_extended = f"Opensearch domain {domain.name} does not have node-to-node encryption enabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ class opensearch_service_domains_not_publicly_accessible(Check):
|
||||
report.resource_tags = domain.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Opensearch domain {domain.name} does not allow anonymous access"
|
||||
f"Opensearch domain {domain.name} does not allow anonymous access."
|
||||
)
|
||||
if domain.access_policy:
|
||||
for statement in domain.access_policy["Statement"]:
|
||||
@@ -30,7 +30,7 @@ class opensearch_service_domains_not_publicly_accessible(Check):
|
||||
):
|
||||
if "Condition" not in statement:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Opensearch domain {domain.name} policy allows access (Principal: '*')"
|
||||
report.status_extended = f"Opensearch domain {domain.name} policy allows access (Principal: '*')."
|
||||
break
|
||||
else:
|
||||
if (
|
||||
@@ -43,11 +43,11 @@ class opensearch_service_domains_not_publicly_accessible(Check):
|
||||
]:
|
||||
if ip == "*":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Opensearch domain {domain.name} policy allows access (Principal: '*') and network *"
|
||||
report.status_extended = f"Opensearch domain {domain.name} policy allows access (Principal: '*') and network *."
|
||||
break
|
||||
elif ip == "0.0.0.0/0":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Opensearch domain {domain.name} policy allows access (Principal: '*') and network 0.0.0.0/0"
|
||||
report.status_extended = f"Opensearch domain {domain.name} policy allows access (Principal: '*') and network 0.0.0.0/0."
|
||||
break
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -14,10 +14,10 @@ class opensearch_service_domains_updated_to_the_latest_service_software_version(
|
||||
report.resource_arn = domain.arn
|
||||
report.resource_tags = domain.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Opensearch domain {domain.name} with version {domain.version} does not have internal updates available"
|
||||
report.status_extended = f"Opensearch domain {domain.name} with version {domain.version} does not have internal updates available."
|
||||
if domain.update_available:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Opensearch domain {domain.name} with version {domain.version} has internal updates available"
|
||||
report.status_extended = f"Opensearch domain {domain.name} with version {domain.version} has internal updates available."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -14,10 +14,10 @@ class opensearch_service_domains_use_cognito_authentication_for_kibana(Check):
|
||||
report.resource_arn = domain.arn
|
||||
report.resource_tags = domain.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Opensearch domain {domain.name} has Amazon Cognito authentication for Kibana enabled"
|
||||
report.status_extended = f"Opensearch domain {domain.name} has Amazon Cognito authentication for Kibana enabled."
|
||||
if not domain.cognito_options:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Opensearch domain {domain.name} does not have Amazon Cognito authentication for Kibana enabled"
|
||||
report.status_extended = f"Opensearch domain {domain.name} does not have Amazon Cognito authentication for Kibana enabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -12,12 +12,12 @@ class organizations_account_part_of_organizations(Check):
|
||||
if org.status == "ACTIVE":
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Account is part of AWS Organization: {org.id}"
|
||||
f"Account is part of AWS Organization: {org.id}."
|
||||
)
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
"AWS Organizations is not in-use for this AWS Account"
|
||||
"AWS Organizations is not in-use for this AWS Account."
|
||||
)
|
||||
report.region = organizations_client.region
|
||||
report.resource_id = org.id
|
||||
|
||||
@@ -30,13 +30,13 @@ class organizations_delegated_administrators(Check):
|
||||
not in organizations_trusted_delegated_administrators
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Untrusted Delegated Administrators: {delegated_administrator.id}"
|
||||
report.status_extended = f"Untrusted Delegated Administrators: {delegated_administrator.id}."
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Trusted Delegated Administrator: {delegated_administrator.id}"
|
||||
report.status_extended = f"Trusted Delegated Administrator: {delegated_administrator.id}."
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"No Delegated Administrators: {org.id}"
|
||||
report.status_extended = f"No Delegated Administrators: {org.id}."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ class organizations_scp_check_deny_regions(Check):
|
||||
if not org.policies:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"No SCP policies exist at the organization {org.id} level"
|
||||
f"No SCP policies exist at the organization {org.id} level."
|
||||
)
|
||||
else:
|
||||
# We use this flag if we find a statement that is restricting regions but not all the configured ones:
|
||||
@@ -56,14 +56,14 @@ class organizations_scp_check_deny_regions(Check):
|
||||
):
|
||||
# All defined regions are restricted, we exit here, no need to continue.
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"SCP policy {policy.id} restricting all configured regions found"
|
||||
report.status_extended = f"SCP policy {policy.id} restricting all configured regions found."
|
||||
findings.append(report)
|
||||
return findings
|
||||
else:
|
||||
# Regions are restricted, but not the ones defined, we keep this finding, but we continue analyzing:
|
||||
is_region_restricted_statement = True
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"SCP policies exist {policy.id} restricting some AWS Regions, but not all the configured ones, please check config..."
|
||||
report.status_extended = f"SCP policies exist {policy.id} restricting some AWS Regions, but not all the configured ones, please check config."
|
||||
|
||||
# Allow if Condition = {"StringEquals": {"aws:RequestedRegion": [region1, region2]}}
|
||||
if (
|
||||
@@ -81,23 +81,23 @@ class organizations_scp_check_deny_regions(Check):
|
||||
):
|
||||
# All defined regions are restricted, we exit here, no need to continue.
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"SCP policy {policy.id} restricting all configured regions found"
|
||||
report.status_extended = f"SCP policy {policy.id} restricting all configured regions found."
|
||||
findings.append(report)
|
||||
return findings
|
||||
else:
|
||||
# Regions are restricted, but not the ones defined, we keep this finding, but we continue analyzing:
|
||||
is_region_restricted_statement = True
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"SCP policies exist {policy.id} restricting some AWS Regions, but not all the configured ones, please check config..."
|
||||
report.status_extended = f"SCP policies exist {policy.id} restricting some AWS Regions, but not all the configured ones, please check config."
|
||||
|
||||
if not is_region_restricted_statement:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"SCP policies exist at the organization {org.id} level but don't restrict AWS Regions"
|
||||
report.status_extended = f"SCP policies exist at the organization {org.id} level but don't restrict AWS Regions."
|
||||
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
"AWS Organizations is not in-use for this AWS Account"
|
||||
"AWS Organizations is not in-use for this AWS Account."
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -15,7 +15,7 @@ class organizations_tags_policies_enabled_and_attached(Check):
|
||||
report.region = organizations_client.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
"AWS Organizations is not in-use for this AWS Account"
|
||||
"AWS Organizations is not in-use for this AWS Account."
|
||||
)
|
||||
if org.status == "ACTIVE":
|
||||
if org.policies is None:
|
||||
@@ -26,11 +26,11 @@ class organizations_tags_policies_enabled_and_attached(Check):
|
||||
if policy.type != "TAG_POLICY":
|
||||
continue
|
||||
|
||||
report.status_extended = f"AWS Organization {org.id} has tag policies enabled but not attached"
|
||||
report.status_extended = f"AWS Organization {org.id} has tag policies enabled but not attached."
|
||||
|
||||
if policy.targets:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"AWS Organization {org.id} has tag policies enabled and attached to an AWS account"
|
||||
report.status_extended = f"AWS Organization {org.id} has tag policies enabled and attached to an AWS account."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -13,12 +13,12 @@ class redshift_cluster_audit_logging(Check):
|
||||
report.resource_tags = cluster.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Redshift Cluster {cluster.arn} has audit logging enabled"
|
||||
f"Redshift Cluster {cluster.arn} has audit logging enabled."
|
||||
)
|
||||
if not cluster.logging_enabled:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Redshift Cluster {cluster.arn} has audit logging disabled"
|
||||
f"Redshift Cluster {cluster.arn} has audit logging disabled."
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -13,12 +13,12 @@ class redshift_cluster_automated_snapshot(Check):
|
||||
report.resource_tags = cluster.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Redshift Cluster {cluster.arn} has automated snapshots"
|
||||
f"Redshift Cluster {cluster.arn} has automated snapshots."
|
||||
)
|
||||
if not cluster.cluster_snapshots:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Redshift Cluster {cluster.arn} has automated snapshots disabled"
|
||||
f"Redshift Cluster {cluster.arn} has automated snapshots disabled."
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -13,12 +13,12 @@ class redshift_cluster_automatic_upgrades(Check):
|
||||
report.resource_tags = cluster.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Redshift Cluster {cluster.arn} has AllowVersionUpgrade enabled"
|
||||
f"Redshift Cluster {cluster.arn} has AllowVersionUpgrade enabled."
|
||||
)
|
||||
if not cluster.allow_version_upgrade:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Redshift Cluster {cluster.arn} has AllowVersionUpgrade disabled"
|
||||
f"Redshift Cluster {cluster.arn} has AllowVersionUpgrade disabled."
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -13,11 +13,11 @@ class redshift_cluster_public_access(Check):
|
||||
report.resource_tags = cluster.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Redshift Cluster {cluster.arn} is not publicly accessible"
|
||||
f"Redshift Cluster {cluster.arn} is not publicly accessible."
|
||||
)
|
||||
if cluster.endpoint_address and cluster.public_access:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Redshift Cluster {cluster.arn} is publicly accessible at endpoint {cluster.endpoint_address}"
|
||||
report.status_extended = f"Redshift Cluster {cluster.arn} is publicly accessible at endpoint {cluster.endpoint_address}."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ class resourceexplorer2_indexes_found(Check):
|
||||
findings = []
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "No Resource Explorer Indexes found"
|
||||
report.status_extended = "No Resource Explorer Indexes found."
|
||||
report.region = resource_explorer_2_client.region
|
||||
report.resource_arn = "NoResourceExplorer"
|
||||
report.resource_id = resource_explorer_2_client.audited_account
|
||||
@@ -18,7 +18,7 @@ class resourceexplorer2_indexes_found(Check):
|
||||
report.region = resource_explorer_2_client.indexes[0].region
|
||||
report.resource_arn = resource_explorer_2_client.indexes[0].arn
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Resource Explorer Indexes found: {len(resource_explorer_2_client.indexes)}"
|
||||
report.status_extended = f"Resource Explorer Indexes found: {len(resource_explorer_2_client.indexes)}."
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -45,8 +45,7 @@ class route53_dangling_ip_subdomain_takeover(Check):
|
||||
aws_ip_ranges = awsipranges.get_ranges()
|
||||
if aws_ip_ranges.get(record):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Route53 record {record} in Hosted Zone {route53_client.hosted_zones[record_set.hosted_zone_id].name} is a dangling IP which can lead to a subdomain takeover attack!"
|
||||
|
||||
report.status_extended = f"Route53 record {record} in Hosted Zone {route53_client.hosted_zones[record_set.hosted_zone_id].name} is a dangling IP which can lead to a subdomain takeover attack."
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -16,12 +16,12 @@ class route53_domains_privacy_protection_enabled(Check):
|
||||
if domain.admin_privacy:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Contact information is private for the {domain.name} domain"
|
||||
f"Contact information is private for the {domain.name} domain."
|
||||
)
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Contact information is public for the {domain.name} domain"
|
||||
f"Contact information is public for the {domain.name} domain."
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -16,12 +16,12 @@ class route53_domains_transferlock_enabled(Check):
|
||||
if domain.status_list and "clientTransferProhibited" in domain.status_list:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Transfer Lock is enabled for the {domain.name} domain"
|
||||
f"Transfer Lock is enabled for the {domain.name} domain."
|
||||
)
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Transfer Lock is disabled for the {domain.name} domain"
|
||||
f"Transfer Lock is disabled for the {domain.name} domain."
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -18,11 +18,11 @@ class route53_public_hosted_zones_cloudwatch_logging_enabled(Check):
|
||||
and hosted_zone.logging_config.cloudwatch_log_group_arn
|
||||
):
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Route53 Public Hosted Zone {hosted_zone.id} has query logging enabled in Log Group {hosted_zone.logging_config.cloudwatch_log_group_arn}"
|
||||
report.status_extended = f"Route53 Public Hosted Zone {hosted_zone.id} has query logging enabled in Log Group {hosted_zone.logging_config.cloudwatch_log_group_arn}."
|
||||
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Route53 Public Hosted Zone {hosted_zone.id} has query logging disabled"
|
||||
report.status_extended = f"Route53 Public Hosted Zone {hosted_zone.id} has query logging disabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ class s3_bucket_policy_public_write_access(Check):
|
||||
)
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"S3 Bucket {bucket.name} allows public write access in the bucket policy.."
|
||||
report.status_extended = f"S3 Bucket {bucket.name} allows public write access in the bucket policy."
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -12,10 +12,10 @@ class sagemaker_models_network_isolation_enabled(Check):
|
||||
report.resource_arn = model.arn
|
||||
report.resource_tags = model.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Sagemaker notebook instance {model.name} has network isolation enabled"
|
||||
report.status_extended = f"Sagemaker notebook instance {model.name} has network isolation enabled."
|
||||
if not model.network_isolation:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Sagemaker notebook instance {model.name} has network isolation disabled"
|
||||
report.status_extended = f"Sagemaker notebook instance {model.name} has network isolation disabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -13,11 +13,11 @@ class sagemaker_models_vpc_settings_configured(Check):
|
||||
report.resource_tags = model.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Sagemaker notebook instance {model.name} has VPC settings enabled"
|
||||
f"Sagemaker notebook instance {model.name} has VPC settings enabled."
|
||||
)
|
||||
if not model.vpc_config_subnets:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Sagemaker notebook instance {model.name} has VPC settings disabled"
|
||||
report.status_extended = f"Sagemaker notebook instance {model.name} has VPC settings disabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -12,10 +12,10 @@ class sagemaker_notebook_instance_encryption_enabled(Check):
|
||||
report.resource_arn = notebook_instance.arn
|
||||
report.resource_tags = notebook_instance.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Sagemaker notebook instance {notebook_instance.name} has data encryption enabled"
|
||||
report.status_extended = f"Sagemaker notebook instance {notebook_instance.name} has data encryption enabled."
|
||||
if not notebook_instance.kms_key_id:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Sagemaker notebook instance {notebook_instance.name} has data encryption disabled"
|
||||
report.status_extended = f"Sagemaker notebook instance {notebook_instance.name} has data encryption disabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -12,10 +12,10 @@ class sagemaker_notebook_instance_root_access_disabled(Check):
|
||||
report.resource_arn = notebook_instance.arn
|
||||
report.resource_tags = notebook_instance.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Sagemaker notebook instance {notebook_instance.name} has root access disabled"
|
||||
report.status_extended = f"Sagemaker notebook instance {notebook_instance.name} has root access disabled."
|
||||
if notebook_instance.root_access:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Sagemaker notebook instance {notebook_instance.name} has root access enabled"
|
||||
report.status_extended = f"Sagemaker notebook instance {notebook_instance.name} has root access enabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -13,11 +13,11 @@ class sagemaker_notebook_instance_vpc_settings_configured(Check):
|
||||
report.resource_tags = notebook_instance.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Sagemaker notebook instance {notebook_instance.name} is in a VPC"
|
||||
f"Sagemaker notebook instance {notebook_instance.name} is in a VPC."
|
||||
)
|
||||
if not notebook_instance.subnet_id:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Sagemaker notebook instance {notebook_instance.name} has VPC settings disabled"
|
||||
report.status_extended = f"Sagemaker notebook instance {notebook_instance.name} has VPC settings disabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -12,10 +12,10 @@ class sagemaker_notebook_instance_without_direct_internet_access_configured(Chec
|
||||
report.resource_arn = notebook_instance.arn
|
||||
report.resource_tags = notebook_instance.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Sagemaker notebook instance {notebook_instance.name} has direct internet access disabled"
|
||||
report.status_extended = f"Sagemaker notebook instance {notebook_instance.name} has direct internet access disabled."
|
||||
if notebook_instance.direct_internet_access:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Sagemaker notebook instance {notebook_instance.name} has direct internet access enabled"
|
||||
report.status_extended = f"Sagemaker notebook instance {notebook_instance.name} has direct internet access enabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -12,10 +12,10 @@ class sagemaker_training_jobs_intercontainer_encryption_enabled(Check):
|
||||
report.resource_arn = training_job.arn
|
||||
report.resource_tags = training_job.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Sagemaker training job {training_job.name} has intercontainer encryption enabled"
|
||||
report.status_extended = f"Sagemaker training job {training_job.name} has intercontainer encryption enabled."
|
||||
if not training_job.container_traffic_encryption:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Sagemaker training job {training_job.name} has intercontainer encryption disabled"
|
||||
report.status_extended = f"Sagemaker training job {training_job.name} has intercontainer encryption disabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -12,10 +12,10 @@ class sagemaker_training_jobs_network_isolation_enabled(Check):
|
||||
report.resource_arn = training_job.arn
|
||||
report.resource_tags = training_job.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Sagemaker training job {training_job.name} has network isolation enabled"
|
||||
report.status_extended = f"Sagemaker training job {training_job.name} has network isolation enabled."
|
||||
if not training_job.network_isolation:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Sagemaker training job {training_job.name} has network isolation disabled"
|
||||
report.status_extended = f"Sagemaker training job {training_job.name} has network isolation disabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -12,12 +12,10 @@ class sagemaker_training_jobs_volume_and_output_encryption_enabled(Check):
|
||||
report.resource_arn = training_job.arn
|
||||
report.resource_tags = training_job.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Sagemaker training job {training_job.name} has KMS encryption enabled"
|
||||
)
|
||||
report.status_extended = f"Sagemaker training job {training_job.name} has KMS encryption enabled."
|
||||
if not training_job.volume_kms_key_id:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Sagemaker training job {training_job.name} has KMS encryption disabled"
|
||||
report.status_extended = f"Sagemaker training job {training_job.name} has KMS encryption disabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -12,10 +12,10 @@ class sagemaker_training_jobs_vpc_settings_configured(Check):
|
||||
report.resource_arn = training_job.arn
|
||||
report.resource_tags = training_job.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Sagemaker training job {training_job.name} has VPC settings for the training job volume and output enabled"
|
||||
report.status_extended = f"Sagemaker training job {training_job.name} has VPC settings for the training job volume and output enabled."
|
||||
if not training_job.vpc_config_subnets:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Sagemaker training job {training_job.name} has VPC settings for the training job volume and output disabled"
|
||||
report.status_extended = f"Sagemaker training job {training_job.name} has VPC settings for the training job volume and output disabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -14,12 +14,12 @@ class shield_advanced_protection_in_associated_elastic_ips(Check):
|
||||
report.resource_arn = elastic_ip.arn
|
||||
report.resource_tags = elastic_ip.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Elastic IP {elastic_ip.allocation_id} is not protected by AWS Shield Advanced"
|
||||
report.status_extended = f"Elastic IP {elastic_ip.allocation_id} is not protected by AWS Shield Advanced."
|
||||
|
||||
for protection in shield_client.protections.values():
|
||||
if elastic_ip.arn == protection.resource_arn:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Elastic IP {elastic_ip.allocation_id} is protected by AWS Shield Advanced"
|
||||
report.status_extended = f"Elastic IP {elastic_ip.allocation_id} is protected by AWS Shield Advanced."
|
||||
break
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -15,14 +15,14 @@ class shield_advanced_protection_in_classic_load_balancers(Check):
|
||||
report.resource_tags = elb.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"ELB {elb.name} is not protected by AWS Shield Advanced"
|
||||
f"ELB {elb.name} is not protected by AWS Shield Advanced."
|
||||
)
|
||||
|
||||
for protection in shield_client.protections.values():
|
||||
if elb.arn == protection.resource_arn:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"ELB {elb.name} is protected by AWS Shield Advanced"
|
||||
f"ELB {elb.name} is protected by AWS Shield Advanced."
|
||||
)
|
||||
break
|
||||
|
||||
|
||||
@@ -16,12 +16,12 @@ class shield_advanced_protection_in_cloudfront_distributions(Check):
|
||||
report.resource_arn = distribution.arn
|
||||
report.resource_tags = distribution.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudFront distribution {distribution.id} is not protected by AWS Shield Advanced"
|
||||
report.status_extended = f"CloudFront distribution {distribution.id} is not protected by AWS Shield Advanced."
|
||||
|
||||
for protection in shield_client.protections.values():
|
||||
if distribution.arn == protection.resource_arn:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudFront distribution {distribution.id} is protected by AWS Shield Advanced"
|
||||
report.status_extended = f"CloudFront distribution {distribution.id} is protected by AWS Shield Advanced."
|
||||
break
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -15,12 +15,12 @@ class shield_advanced_protection_in_global_accelerators(Check):
|
||||
report.resource_id = accelerator.name
|
||||
report.resource_arn = accelerator.arn
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Global Accelerator {accelerator.name} is not protected by AWS Shield Advanced"
|
||||
report.status_extended = f"Global Accelerator {accelerator.name} is not protected by AWS Shield Advanced."
|
||||
|
||||
for protection in shield_client.protections.values():
|
||||
if accelerator.arn == protection.resource_arn:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Global Accelerator {accelerator.name} is protected by AWS Shield Advanced"
|
||||
report.status_extended = f"Global Accelerator {accelerator.name} is protected by AWS Shield Advanced."
|
||||
break
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -15,12 +15,12 @@ class shield_advanced_protection_in_internet_facing_load_balancers(Check):
|
||||
report.resource_arn = elbv2.arn
|
||||
report.resource_tags = elbv2.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ELBv2 ALB {elbv2.name} is not protected by AWS Shield Advanced"
|
||||
report.status_extended = f"ELBv2 ALB {elbv2.name} is not protected by AWS Shield Advanced."
|
||||
|
||||
for protection in shield_client.protections.values():
|
||||
if elbv2.arn == protection.resource_arn:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"ELBv2 ALB {elbv2.name} is protected by AWS Shield Advanced"
|
||||
report.status_extended = f"ELBv2 ALB {elbv2.name} is protected by AWS Shield Advanced."
|
||||
break
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -14,12 +14,12 @@ class shield_advanced_protection_in_route53_hosted_zones(Check):
|
||||
report.resource_arn = hosted_zone.arn
|
||||
report.resource_tags = hosted_zone.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Route53 Hosted Zone {hosted_zone.id} is not protected by AWS Shield Advanced"
|
||||
report.status_extended = f"Route53 Hosted Zone {hosted_zone.id} is not protected by AWS Shield Advanced."
|
||||
|
||||
for protection in shield_client.protections.values():
|
||||
if hosted_zone.arn == protection.resource_arn:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Route53 Hosted Zone {hosted_zone.id} is protected by AWS Shield Advanced"
|
||||
report.status_extended = f"Route53 Hosted Zone {hosted_zone.id} is protected by AWS Shield Advanced."
|
||||
break
|
||||
|
||||
findings.append(report)
|
||||
|
||||
@@ -15,7 +15,9 @@ class sns_topics_not_publicly_accessible(Check):
|
||||
report.resource_arn = topic.arn
|
||||
report.resource_tags = topic.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"SNS topic {topic.name} is not publicly accesible"
|
||||
report.status_extended = (
|
||||
f"SNS topic {topic.name} is not publicly accesible."
|
||||
)
|
||||
if topic.policy:
|
||||
for statement in topic.policy["Statement"]:
|
||||
# Only check allow statements
|
||||
@@ -37,10 +39,10 @@ class sns_topics_not_publicly_accessible(Check):
|
||||
statement["Condition"], sns_client.audited_account
|
||||
)
|
||||
):
|
||||
report.status_extended = f"SNS topic {topic.name} is not public because its policy only allows access from the same account"
|
||||
report.status_extended = f"SNS topic {topic.name} is not public because its policy only allows access from the same account."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"SNS topic {topic.name} is public because its policy allows public access"
|
||||
report.status_extended = f"SNS topic {topic.name} is public because its policy allows public access."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ class sqs_queues_not_publicly_accessible(Check):
|
||||
report.resource_arn = queue.arn
|
||||
report.resource_tags = queue.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"SQS queue {queue.id} is not public"
|
||||
report.status_extended = f"SQS queue {queue.id} is not public."
|
||||
if queue.policy:
|
||||
for statement in queue.policy["Statement"]:
|
||||
# Only check allow statements
|
||||
@@ -37,10 +37,10 @@ class sqs_queues_not_publicly_accessible(Check):
|
||||
statement["Condition"], sqs_client.audited_account
|
||||
)
|
||||
):
|
||||
report.status_extended = f"SQS queue {queue.id} is not public because its policy only allows access from the same account"
|
||||
report.status_extended = f"SQS queue {queue.id} is not public because its policy only allows access from the same account."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"SQS queue {queue.id} is public because its policy allows public access"
|
||||
report.status_extended = f"SQS queue {queue.id} is public because its policy allows public access."
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -13,12 +13,12 @@ class sqs_queues_server_side_encryption_enabled(Check):
|
||||
report.resource_tags = queue.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"SQS queue {queue.id} is using Server Side Encryption"
|
||||
f"SQS queue {queue.id} is using Server Side Encryption."
|
||||
)
|
||||
if not queue.kms_key_id:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"SQS queue {queue.id} is not using Server Side Encryption"
|
||||
f"SQS queue {queue.id} is not using Server Side Encryption."
|
||||
)
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -19,7 +19,9 @@ class ssm_document_secrets(Check):
|
||||
report.resource_id = document.name
|
||||
report.resource_tags = document.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"No secrets found in SSM Document {document.name}"
|
||||
report.status_extended = (
|
||||
f"No secrets found in SSM Document {document.name}."
|
||||
)
|
||||
|
||||
if document.content:
|
||||
temp_env_data_file = tempfile.NamedTemporaryFile(delete=False)
|
||||
@@ -43,7 +45,7 @@ class ssm_document_secrets(Check):
|
||||
]
|
||||
)
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Potential secret found in SSM Document {document.name} -> {secrets_string}"
|
||||
report.status_extended = f"Potential secret found in SSM Document {document.name} -> {secrets_string}."
|
||||
|
||||
os.remove(temp_env_data_file.name)
|
||||
|
||||
|
||||
@@ -13,10 +13,10 @@ class ssm_documents_set_as_public(Check):
|
||||
report.resource_tags = document.tags
|
||||
if document.account_owners:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"SSM Document {document.name} is public"
|
||||
report.status_extended = f"SSM Document {document.name} is public."
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"SSM Document {document.name} is not public"
|
||||
report.status_extended = f"SSM Document {document.name} is not public."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ class vpc_different_regions(Check):
|
||||
report.resource_id = vpc_client.audited_account
|
||||
report.resource_arn = vpc_client.audited_account_arn
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "VPCs found only in one region"
|
||||
report.status_extended = "VPCs found only in one region."
|
||||
if len(vpc_regions) > 1:
|
||||
report.status = "PASS"
|
||||
report.status_extended = "VPCs found in more than one region."
|
||||
|
||||
@@ -15,12 +15,12 @@ class vpc_subnet_no_public_ip_by_default(Check):
|
||||
if subnet.mapPublicIpOnLaunch:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"VPC subnet {subnet.id} assigns public IP by default"
|
||||
f"VPC subnet {subnet.id} assigns public IP by default."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"VPC subnet {subnet.id} does NOT assign public IP by default"
|
||||
f"VPC subnet {subnet.id} does NOT assign public IP by default."
|
||||
)
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -14,10 +14,10 @@ class wellarchitected_workload_no_high_or_medium_risks(Check):
|
||||
report.resource_arn = workload.arn
|
||||
report.resource_tags = workload.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Well Architected workload {workload.name} does not contain high or medium risks"
|
||||
report.status_extended = f"Well Architected workload {workload.name} does not contain high or medium risks."
|
||||
if "HIGH" in workload.risks or "MEDIUM" in workload.risks:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Well Architected workload {workload.name} contains {workload.risks.get('HIGH',0)} high and {workload.risks.get('MEDIUM',0)} medium risks"
|
||||
report.status_extended = f"Well Architected workload {workload.name} contains {workload.risks.get('HIGH',0)} high and {workload.risks.get('MEDIUM',0)} medium risks."
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -14,23 +14,19 @@ class workspaces_volume_encryption_enabled(Check):
|
||||
report.resource_arn = workspace.arn
|
||||
report.resource_tags = workspace.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"WorkSpaces workspace {workspace.id} without root or user unencrypted volumes"
|
||||
report.status_extended = f"WorkSpaces workspace {workspace.id} without root or user unencrypted volumes."
|
||||
if not workspace.user_volume_encryption_enabled:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"WorkSpaces workspace {workspace.id} with user unencrypted volumes"
|
||||
)
|
||||
report.status_extended = f"WorkSpaces workspace {workspace.id} with user unencrypted volumes."
|
||||
if not workspace.root_volume_encryption_enabled:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"WorkSpaces workspace {workspace.id} with root unencrypted volumes"
|
||||
)
|
||||
report.status_extended = f"WorkSpaces workspace {workspace.id} with root unencrypted volumes."
|
||||
if (
|
||||
not workspace.root_volume_encryption_enabled
|
||||
and not workspace.user_volume_encryption_enabled
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"WorkSpaces workspace {workspace.id} with root and user unencrypted volumes"
|
||||
report.status_extended = f"WorkSpaces workspace {workspace.id} with root and user unencrypted volumes."
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user