fix(services): solve errors in EMR, RDS, S3 and VPC services (#1913)

This commit is contained in:
Sergio Garcia
2023-02-21 11:11:39 +01:00
committed by GitHub
parent 640d1bd176
commit f913536d88
7 changed files with 132 additions and 165 deletions

View File

@@ -110,16 +110,19 @@ class EMR:
)
# Save MasterPublicDnsName
master_public_dns_name = cluster_info["Cluster"][
master_public_dns_name = cluster_info["Cluster"].get(
"MasterPublicDnsName"
]
)
self.clusters[
cluster.id
].master_public_dns_name = master_public_dns_name
# Set cluster Public/Private
# Public EMR cluster have their DNS ending with .amazonaws.com
# while private ones have format of ip-xxx-xx-xx.us-east-1.compute.internal.
if ".amazonaws.com" in master_public_dns_name:
if (
master_public_dns_name
and ".amazonaws.com" in master_public_dns_name
):
self.clusters[cluster.id].public = True
except Exception as error:

View File

@@ -54,7 +54,7 @@ class RDS:
self.db_instances.append(
DBInstance(
id=instance["DBInstanceIdentifier"],
endpoint=instance["Endpoint"]["Address"],
endpoint=instance.get("Endpoint"),
engine=instance["Engine"],
status=instance["DBInstanceStatus"],
public=instance["PubliclyAccessible"],
@@ -171,7 +171,7 @@ class RDS:
class DBInstance(BaseModel):
id: str
endpoint: str
endpoint: Optional[dict]
engine: str
status: str
public: bool

View File

@@ -44,35 +44,33 @@ class S3:
try:
list_buckets = self.client.list_buckets()
for bucket in list_buckets["Buckets"]:
try:
bucket_region = self.client.get_bucket_location(
Bucket=bucket["Name"]
)["LocationConstraint"]
if bucket_region == "EU": # If EU, bucket_region is eu-west-1
bucket_region = "eu-west-1"
if not bucket_region: # If Nonce, bucket_region is us-east-1
bucket_region = "us-east-1"
# Arn
arn = f"arn:{self.audited_partition}:s3:::{bucket['Name']}"
if not self.audit_resources or (
is_resource_filtered(arn, self.audit_resources)
):
# Check if there are filter regions
if audit_info.audited_regions:
if bucket_region in audit_info.audited_regions:
buckets.append(
Bucket(bucket["Name"], arn, bucket_region)
)
else:
bucket_region = self.client.get_bucket_location(Bucket=bucket["Name"])[
"LocationConstraint"
]
if bucket_region == "EU": # If EU, bucket_region is eu-west-1
bucket_region = "eu-west-1"
if not bucket_region: # If Nonce, bucket_region is us-east-1
bucket_region = "us-east-1"
# Arn
arn = f"arn:{self.audited_partition}:s3:::{bucket['Name']}"
if not self.audit_resources or (
is_resource_filtered(arn, self.audit_resources)
):
# Check if there are filter regions
if audit_info.audited_regions:
if bucket_region in audit_info.audited_regions:
buckets.append(Bucket(bucket["Name"], arn, bucket_region))
except Exception as error:
logger.error(
f"{bucket} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
buckets.append(Bucket(bucket["Name"], arn, bucket_region))
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
if bucket:
logger.error(
f"{bucket} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
return buckets
def __get_bucket_versioning__(self, bucket):
@@ -89,9 +87,14 @@ class S3:
if "Enabled" == bucket_versioning["MFADelete"]:
bucket.mfa_delete = True
except Exception as error:
logger.error(
f"{bucket.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
if bucket.region:
logger.error(
f"{bucket.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def __get_bucket_encryption__(self, bucket):
logger.info("S3 - Get buckets encryption...")
@@ -107,10 +110,14 @@ class S3:
except Exception as error:
if "ServerSideEncryptionConfigurationNotFoundError" in str(error):
bucket.encryption = None
else:
elif regional_client:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def __get_bucket_logging__(self, bucket):
logger.info("S3 - Get buckets logging...")
@@ -123,9 +130,14 @@ class S3:
"TargetBucket"
]
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
if regional_client:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def __get_public_access_block__(self, bucket):
logger.info("S3 - Get buckets public access block...")
@@ -148,9 +160,14 @@ class S3:
}
)
else:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
if regional_client:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def __get_bucket_acl__(self, bucket):
logger.info("S3 - Get buckets acl...")
@@ -171,9 +188,14 @@ class S3:
grantees.append(grantee)
bucket.acl_grantees = grantees
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
if regional_client:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def __get_bucket_policy__(self, bucket):
logger.info("S3 - Get buckets policy...")
@@ -186,9 +208,14 @@ class S3:
if "NoSuchBucketPolicy" in str(error):
bucket.policy = {}
else:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
if regional_client:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def __get_bucket_ownership_controls__(self, bucket):
logger.info("S3 - Get buckets ownership controls...")
@@ -201,9 +228,14 @@ class S3:
if "OwnershipControlsNotFoundError" in str(error):
bucket.ownership = None
else:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
if regional_client:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
################## S3Control

View File

@@ -1,6 +1,8 @@
import json
import threading
from dataclasses import dataclass
from typing import Optional
from pydantic import BaseModel
from prowler.lib.logger import logger
from prowler.lib.scan_filters.scan_filters import is_resource_filtered
@@ -50,10 +52,10 @@ class VPC:
):
self.vpcs.append(
VPCs(
vpc["VpcId"],
vpc["IsDefault"],
vpc["CidrBlock"],
regional_client.region,
id=vpc["VpcId"],
default=vpc["IsDefault"],
cidr_block=vpc["CidrBlock"],
region=regional_client.region,
)
)
except Exception as error:
@@ -74,14 +76,17 @@ class VPC:
conn["VpcPeeringConnectionId"], self.audit_resources
)
):
conn["AccepterVpcInfo"]["CidrBlock"] = None
self.vpc_peering_connections.append(
VpcPeeringConnection(
conn["VpcPeeringConnectionId"],
conn["AccepterVpcInfo"]["VpcId"],
conn["AccepterVpcInfo"]["CidrBlock"],
conn["RequesterVpcInfo"]["VpcId"],
conn["RequesterVpcInfo"]["CidrBlock"],
regional_client.region,
id=conn["VpcPeeringConnectionId"],
accepter_vpc=conn["AccepterVpcInfo"]["VpcId"],
accepter_cidr=conn["AccepterVpcInfo"].get("CidrBlock"),
requester_vpc=conn["RequesterVpcInfo"]["VpcId"],
requester_cidr=conn["RequesterVpcInfo"].get(
"CidrBlock"
),
region=regional_client.region,
)
)
except Exception as error:
@@ -113,8 +118,8 @@ class VPC:
destination_cidrs.append(route["DestinationCidrBlock"])
conn.route_tables.append(
Route(
route_table["RouteTableId"],
destination_cidrs,
id=route_table["RouteTableId"],
destination_cidrs=destination_cidrs,
)
)
except Exception as error:
@@ -160,12 +165,12 @@ class VPC:
endpoint_policy = json.loads(endpoint["PolicyDocument"])
self.vpc_endpoints.append(
VpcEndpoint(
endpoint["VpcEndpointId"],
endpoint["VpcId"],
endpoint["State"],
endpoint_policy,
endpoint["OwnerId"],
regional_client.region,
id=endpoint["VpcEndpointId"],
vpc_id=endpoint["VpcId"],
state=endpoint["State"],
policy_document=endpoint_policy,
owner_id=endpoint["OwnerId"],
region=regional_client.region,
)
)
except Exception as error:
@@ -189,10 +194,10 @@ class VPC:
):
self.vpc_endpoint_services.append(
VpcEndpointService(
endpoint["ServiceId"],
endpoint["ServiceName"],
endpoint["Owner"],
regional_client.region,
id=endpoint["ServiceId"],
service=endpoint["ServiceName"],
owner_id=endpoint["Owner"],
region=regional_client.region,
)
)
except Exception as error:
@@ -217,114 +222,41 @@ class VPC:
logger.error(f"{error.__class__.__name__}: {error}")
@dataclass
class VPCs:
class VPCs(BaseModel):
id: str
default: bool
cidr_block: str
flow_log: bool
flow_log: bool = False
region: str
def __init__(
self,
id,
default,
cidr_block,
region,
):
self.id = id
self.default = default
self.cidr_block = cidr_block
self.flow_log = False
self.region = region
@dataclass
class Route:
class Route(BaseModel):
id: str
destination_cidrs: list[str]
def __init__(
self,
id,
destination_cidrs,
):
self.id = id
self.destination_cidrs = destination_cidrs
@dataclass
class VpcPeeringConnection:
class VpcPeeringConnection(BaseModel):
id: str
accepter_vpc: str
accepter_cidr: str
accepter_cidr: Optional[str]
requester_vpc: str
requester_cidr: str
route_tables: list[Route]
requester_cidr: Optional[str]
route_tables: list[Route] = []
region: str
def __init__(
self,
id,
accepter_vpc,
accepter_cidr,
requester_vpc,
requester_cidr,
region,
):
self.id = id
self.accepter_vpc = accepter_vpc
self.accepter_cidr = accepter_cidr
self.requester_vpc = requester_vpc
self.requester_cidr = requester_cidr
self.route_tables = []
self.region = region
@dataclass
class VpcEndpoint:
class VpcEndpoint(BaseModel):
id: str
vpc_id: str
state: str
policy_document: dict
owner_id: list[Route]
owner_id: str
region: str
def __init__(
self,
id,
vpc_id,
state,
policy_document,
owner_id,
region,
):
self.id = id
self.vpc_id = vpc_id
self.state = state
self.policy_document = policy_document
self.owner_id = owner_id
self.route_tables = []
self.region = region
@dataclass
class VpcEndpointService:
class VpcEndpointService(BaseModel):
id: str
service: str
owner_id: str
allowed_principals: list
allowed_principals: list = []
region: str
def __init__(
self,
id,
service,
owner_id,
region,
):
self.id = id
self.service = service
self.owner_id = owner_id
self.allowed_principals = []
self.region = region

View File

@@ -90,7 +90,7 @@ class Test_RDS_Service:
assert rds.db_instances[0].id == "db-master-1"
assert rds.db_instances[0].region == AWS_REGION
assert (
rds.db_instances[0].endpoint
rds.db_instances[0].endpoint["Address"]
== "db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com"
)
assert rds.db_instances[0].status == "available"

View File

@@ -75,8 +75,8 @@ class Test_vpc_peering_routing_tables_with_least_privilege:
service_client.vpc_peering_connections[0].route_tables = [
Route(
main_route_table_id,
["10.12.23.44/32"],
id=main_route_table_id,
destination_cidrs=["10.12.23.44/32"],
)
]
check = vpc_peering_routing_tables_with_least_privilege()
@@ -138,8 +138,8 @@ class Test_vpc_peering_routing_tables_with_least_privilege:
service_client.vpc_peering_connections[0].route_tables = [
Route(
main_route_table_id,
["10.0.0.0/16"],
id=main_route_table_id,
destination_cidrs=["10.0.0.0/16"],
)
]
check = vpc_peering_routing_tables_with_least_privilege()

View File

@@ -163,8 +163,8 @@ class Test_VPC_Service:
vpc = VPC(audit_info)
vpc.vpc_peering_connections[0].route_tables = [
Route(
main_route_table_id,
["10.0.0.4/24"],
id=main_route_table_id,
destination_cidrs=["10.0.0.4/24"],
)
]
assert len(vpc.vpc_peering_connections[0].route_tables) == 1