feat(): redshift service and checks (#1497)

Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
This commit is contained in:
Nacho Rivera
2022-11-17 20:50:30 +01:00
committed by GitHub
parent 3370475fe9
commit 025b0547cd
24 changed files with 870 additions and 185 deletions

View File

@@ -1,44 +0,0 @@
#!/usr/bin/env bash
# Prowler - the handy cloud security tool (copyright 2018) by Toni de la Fuente
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
CHECK_ID_extra711="7.11"
CHECK_TITLE_extra711="[extra711] Check for Publicly Accessible Redshift Clusters"
CHECK_SCORED_extra711="NOT_SCORED"
CHECK_CIS_LEVEL_extra711="EXTRA"
CHECK_SEVERITY_extra711="High"
CHECK_ASFF_RESOURCE_TYPE_extra711="AwsRedshiftCluster"
CHECK_ALTERNATE_check711="extra711"
CHECK_SERVICENAME_extra711="redshift"
CHECK_RISK_extra711='Publicly accessible services could expose sensitive data to bad actors.'
CHECK_REMEDIATION_extra711='List all shared Redshift clusters and make sure there is a business reason for them.'
CHECK_DOC_extra711='https://docs.aws.amazon.com/redshift/latest/mgmt/managing-clusters-vpc.html'
CHECK_CAF_EPIC_extra711='Data Protection'
extra711(){
# "Check for Publicly Accessible Redshift Clusters "
for regx in $REGIONS; do
LIST_OF_PUBLIC_REDSHIFT_CLUSTERS=$($AWSCLI redshift describe-clusters $PROFILE_OPT --region $regx --query 'Clusters[?PubliclyAccessible == `true`].[ClusterIdentifier,Endpoint.Address]' --output text 2>&1)
if [[ $(echo "$LIST_OF_PUBLIC_REDSHIFT_CLUSTERS" | grep -E 'AccessDenied|UnauthorizedOperation') ]]; then
textInfo "$regx: Access Denied trying to describe clusters" "$regx"
continue
fi
if [[ $LIST_OF_PUBLIC_REDSHIFT_CLUSTERS ]];then
while read -r cluster;do
CLUSTER_ID=$(echo $cluster | awk '{ print $1; }')
CLUSTER_ENDPOINT=$(echo $cluster | awk '{ print $2; }')
textFail "$regx: Cluster: $CLUSTER_ID at Endpoint: $CLUSTER_ENDPOINT is publicly accessible!" "$regx" "$CLUSTER_ID"
done <<< "$LIST_OF_PUBLIC_REDSHIFT_CLUSTERS"
else
textPass "$regx: no Publicly Accessible Redshift Clusters found" "$regx" "$CLUSTER_ID"
fi
done
}

View File

@@ -1,47 +0,0 @@
#!/usr/bin/env bash
# Prowler - the handy cloud security tool (copyright 2018) by Toni de la Fuente
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
CHECK_ID_extra7149="7.149"
CHECK_TITLE_extra7149="[extra7149] Check if Redshift Clusters have automated snapshots enabled"
CHECK_SCORED_extra7149="NOT_SCORED"
CHECK_CIS_LEVEL_extra7149="EXTRA"
CHECK_SEVERITY_extra7149="Medium"
CHECK_ASFF_RESOURCE_TYPE_extra7149="AwsRedshiftCluster"
CHECK_ALTERNATE_check7149="extra7149"
CHECK_SERVICENAME_extra7149="redshift"
CHECK_RISK_extra7149='If backup is not enabled; data is vulnerable. Human error or bad actors could erase or modify data.'
CHECK_REMEDIATION_extra7149='Enable automated backup for production data. Define a retention period and periodically test backup restoration. A Disaster Recovery process should be in place to govern Data Protection approach.'
CHECK_DOC_extra7149='https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/AWS_Redshift.html'
CHECK_CAF_EPIC_extra7149='Data Protection'
extra7149() {
# "Check if Redshift cluster has audit logging enabled "
for regx in $REGIONS; do
LIST_OF_REDSHIFT_CLUSTERS=$($AWSCLI redshift describe-clusters $PROFILE_OPT --region $regx --query 'Clusters[*].ClusterIdentifier' --output text 2>&1)
if [[ $(echo "$LIST_OF_REDSHIFT_CLUSTERS" | grep -E 'AccessDenied|UnauthorizedOperation|AuthorizationError') ]]; then
textInfo "$regx: Access Denied trying to describe clusters" "$regx"
continue
fi
if [[ $LIST_OF_REDSHIFT_CLUSTERS ]]; then
for redshiftcluster in $LIST_OF_REDSHIFT_CLUSTERS; do
REDSHIFT_SNAPSHOT_ENABLED=$($AWSCLI redshift describe-cluster-snapshots $PROFILE_OPT --region $regx --cluster-identifier $redshiftcluster --snapshot-type automated)
if [[ $REDSHIFT_SNAPSHOT_ENABLED ]]; then
textPass "$regx: Redshift cluster $redshiftcluster has automated snapshots." "$regx" "$redshiftcluster"
else
textFail "$regx: Redshift cluster $redshiftcluster has automated snapshots disabled!" "$regx" "$redshiftcluster"
fi
done
else
textInfo "$regx: No Redshift cluster configured" "$regx"
fi
done
}

View File

@@ -1,46 +0,0 @@
#!/usr/bin/env bash
# Prowler - the handy cloud security tool (copyright 2021) by Toni de la Fuente
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
CHECK_ID_extra7160="7.160"
CHECK_TITLE_extra7160="[extra7160] Check if Redshift has automatic upgrades enabled"
CHECK_SCORED_extra7160="NOT_SCORED"
CHECK_CIS_LEVEL_extra7160="EXTRA"
CHECK_SEVERITY_extra7160="Medium"
CHECK_ASFF_RESOURCE_TYPE_extra7160="AwsRedshift"
CHECK_ALTERNATE_check7160="extra7160"
CHECK_SERVICENAME_extra7160="redshift"
CHECK_RISK_extra7160='Without automatic version upgrade enabled; a critical Redshift Cluster version can become severly out of date.'
CHECK_REMEDIATION_extra7160='Enabled AutomaticVersionUpgrade on Redshift Cluster'
CHECK_DOC_extra7160='https://docs.aws.amazon.com/redshift/latest/mgmt/managing-cluster-operations.html'
CHECK_CAF_EPIC_extra7160='Infrastructure Security'
extra7160(){
for regx in $REGIONS; do
LIST_OF_CLUSTERS=$($AWSCLI redshift describe-clusters $PROFILE_OPT --query 'Clusters[*].ClusterIdentifier' --region $regx --output text 2>&1)
if [[ $(echo "$LIST_OF_CLUSTERS" | grep -E 'AccessDenied|UnauthorizedOperation|AuthorizationError') ]]; then
textInfo "$regx: Access Denied trying to describe clusters" "$regx"
continue
fi
if [[ $LIST_OF_CLUSTERS ]]; then
for cluster in $LIST_OF_CLUSTERS; do
AUTO_UPGRADE_ENABLED=$($AWSCLI redshift describe-clusters $PROFILE_OPT --cluster-identifier $cluster --query 'Clusters[*].AllowVersionUpgrade' --region $regx --output text)
if [[ $AUTO_UPGRADE_ENABLED == "True" ]]; then
textPass "$regx: $cluster has AllowVersionUpgrade enabled" "$regx" "$cluster"
else
textFail "$regx: $cluster has AllowVersionUpgrade disabled" "$regx" "$cluster"
fi
done
else
textInfo "$regx: No Redshift Clusters found" "$regx"
fi
done
}

View File

@@ -1,48 +0,0 @@
#!/usr/bin/env bash
# Prowler - the handy cloud security tool (copyright 2018) by Toni de la Fuente
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
CHECK_ID_extra721="7.21"
CHECK_TITLE_extra721="[extra721] Check if Redshift cluster has audit logging enabled"
CHECK_SCORED_extra721="NOT_SCORED"
CHECK_CIS_LEVEL_extra721="EXTRA"
CHECK_SEVERITY_extra721="Medium"
CHECK_ASFF_RESOURCE_TYPE_extra721="AwsRedshiftCluster"
CHECK_ALTERNATE_check721="extra721"
CHECK_SERVICENAME_extra721="redshift"
CHECK_RISK_extra721='If logs are not enabled; monitoring of service use and threat analysis is not possible.'
CHECK_REMEDIATION_extra721='Enable logs. Create an S3 lifecycle policy. Define use cases; metrics and automated responses where applicable.'
CHECK_DOC_extra721='https://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html'
CHECK_CAF_EPIC_extra721='Logging and Monitoring'
extra721(){
# "Check if Redshift cluster has audit logging enabled "
for regx in $REGIONS; do
LIST_OF_REDSHIFT_CLUSTERS=$($AWSCLI redshift describe-clusters $PROFILE_OPT --region $regx --query 'Clusters[*].ClusterIdentifier' --output text 2>&1)
if [[ $(echo "$LIST_OF_REDSHIFT_CLUSTERS" | grep -E 'AccessDenied|UnauthorizedOperation') ]]; then
textInfo "$regx: Access Denied trying to describe clusters" "$regx"
continue
fi
if [[ $LIST_OF_REDSHIFT_CLUSTERS ]]; then
for redshiftcluster in $LIST_OF_REDSHIFT_CLUSTERS;do
REDSHIFT_LOG_ENABLED=$($AWSCLI redshift describe-logging-status $PROFILE_OPT --region $regx --cluster-identifier $redshiftcluster --query LoggingEnabled --output text | grep True)
if [[ $REDSHIFT_LOG_ENABLED ]];then
REDSHIFT_LOG_ENABLED_BUCKET=$($AWSCLI redshift describe-logging-status $PROFILE_OPT --region $regx --cluster-identifier $redshiftcluster --query BucketName --output text)
textPass "$regx: Redshift cluster $redshiftcluster has audit logging enabled to bucket $REDSHIFT_LOG_ENABLED_BUCKET" "$regx" "$redshiftcluster"
else
textFail "$regx: Redshift cluster $redshiftcluster logging disabled!" "$regx" "$redshiftcluster"
fi
done
else
textInfo "$regx: No Redshift cluster configured" "$regx"
fi
done
}

View File

@@ -0,0 +1,4 @@
from providers.aws.lib.audit_info.audit_info import current_audit_info
from providers.aws.services.redshift.redshift_service import Redshift
redshift_client = Redshift(current_audit_info)

View File

@@ -0,0 +1,35 @@
{
"Provider": "aws",
"CheckID": "redshift_cluster_audit_logging",
"CheckTitle": "Check if Redshift cluster has audit logging enabled",
"CheckType": [],
"ServiceName": "redshift",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:redshift:region:account-id:cluster:cluster-name",
"Severity": "medium",
"ResourceType": "AwsRedshiftCluster",
"Description": "Check if Redshift cluster has audit logging enabled",
"Risk": "If logs are not enabled; monitoring of service use and threat analysis is not possible.",
"RelatedUrl": "https://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html",
"Remediation": {
"Code": {
"CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Redshift/redshift-cluster-audit-logging-enabled.html",
"NativeIaC": "https://docs.bridgecrew.io/docs/bc_aws_logging_12#cloudformation",
"Other": "https://docs.bridgecrew.io/docs/bc_aws_logging_12#aws-console",
"Terraform": "https://docs.bridgecrew.io/docs/bc_aws_logging_12#terraform"
},
"Recommendation": {
"Text": "Enable logs. Create an S3 lifecycle policy. Define use cases, metrics and automated responses where applicable.",
"Url": "https://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html"
}
},
"Categories": [],
"Tags": {
"Tag1Key": "value",
"Tag2Key": "value"
},
"DependsOn": [],
"RelatedTo": [],
"Notes": "",
"Compliance": []
}

View File

@@ -0,0 +1,25 @@
from lib.check.models import Check, Check_Report
from providers.aws.services.redshift.redshift_client import redshift_client
class redshift_cluster_audit_logging(Check):
def execute(self):
findings = []
for cluster in redshift_client.clusters:
report = Check_Report(self.metadata)
report.region = cluster.region
report.resource_id = cluster.id
report.resource_arn = cluster.arn
report.status = "PASS"
report.status_extended = (
f"Redshift Cluster {cluster.arn} has audit logging enabled"
)
if not cluster.logging_enabled:
report.status = "FAIL"
report.status_extended = (
f"Redshift Cluster {cluster.arn} has audit logging disabled"
)
findings.append(report)
return findings

View File

@@ -0,0 +1,78 @@
from re import search
from unittest import mock
from uuid import uuid4
from providers.aws.services.redshift.redshift_service import Cluster
AWS_REGION = "eu-west-1"
AWS_ACCOUNT_NUMBER = "123456789012"
cluster_id = str(uuid4())
class Test_redshift_cluster_audit_logging:
def test_no_clusters(self):
redshift_client = mock.MagicMock
redshift_client.clusters = []
with mock.patch(
"providers.aws.services.redshift.redshift_service.Redshift",
redshift_client,
):
from providers.aws.services.redshift.redshift_cluster_audit_logging.redshift_cluster_audit_logging import (
redshift_cluster_audit_logging,
)
check = redshift_cluster_audit_logging()
result = check.execute()
assert len(result) == 0
def test_cluster_is_not_audit_logging(self):
redshift_client = mock.MagicMock
redshift_client.clusters = []
redshift_client.clusters.append(
Cluster(
id=cluster_id,
region=AWS_REGION,
logging_enabled=False,
)
)
with mock.patch(
"providers.aws.services.redshift.redshift_service.Redshift",
redshift_client,
):
from providers.aws.services.redshift.redshift_cluster_audit_logging.redshift_cluster_audit_logging import (
redshift_cluster_audit_logging,
)
check = redshift_cluster_audit_logging()
result = check.execute()
assert result[0].status == "FAIL"
assert search("has audit logging disabled", result[0].status_extended)
assert result[0].resource_id == cluster_id
assert result[0].resource_arn == ""
def test_cluster_is_audit_logging(self):
redshift_client = mock.MagicMock
redshift_client.clusters = []
redshift_client.clusters.append(
Cluster(
id=cluster_id,
region=AWS_REGION,
logging_enabled=True,
endpoint_address="192.192.192.192",
)
)
with mock.patch(
"providers.aws.services.redshift.redshift_service.Redshift",
redshift_client,
):
from providers.aws.services.redshift.redshift_cluster_audit_logging.redshift_cluster_audit_logging import (
redshift_cluster_audit_logging,
)
check = redshift_cluster_audit_logging()
result = check.execute()
assert result[0].status == "PASS"
assert search("has audit logging enabled", result[0].status_extended)
assert result[0].resource_id == cluster_id
assert result[0].resource_arn == ""

View File

@@ -0,0 +1,35 @@
{
"Provider": "aws",
"CheckID": "redshift_cluster_automated_snapshot",
"CheckTitle": "Check if Redshift Clusters have automated snapshots enabled",
"CheckType": [],
"ServiceName": "redshift",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:redshift:region:account-id:cluster:cluster-name",
"Severity": "medium",
"ResourceType": "AwsRedshiftCluster",
"Description": "Check if Redshift Clusters have automated snapshots enabled",
"Risk": "If backup is not enabled, data is vulnerable. Human error or bad actors could erase or modify data.",
"RelatedUrl": "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/AWS_Redshift.html",
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "",
"Other": "",
"Terraform": ""
},
"Recommendation": {
"Text": "Enable automated backup for production data. Define a retention period and periodically test backup restoration. A Disaster Recovery process should be in place to govern Data Protection approach",
"Url": "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/AWS_Redshift.html"
}
},
"Categories": [],
"Tags": {
"Tag1Key": "value",
"Tag2Key": "value"
},
"DependsOn": [],
"RelatedTo": [],
"Notes": "",
"Compliance": []
}

View File

@@ -0,0 +1,25 @@
from lib.check.models import Check, Check_Report
from providers.aws.services.redshift.redshift_client import redshift_client
class redshift_cluster_automated_snapshot(Check):
def execute(self):
findings = []
for cluster in redshift_client.clusters:
report = Check_Report(self.metadata)
report.region = cluster.region
report.resource_id = cluster.id
report.resource_arn = cluster.arn
report.status = "PASS"
report.status_extended = (
f"Redshift Cluster {cluster.arn} has automated snapshots"
)
if not cluster.cluster_snapshots:
report.status = "FAIL"
report.status_extended = (
f"Redshift Cluster {cluster.arn} has automated snapshots disabled"
)
findings.append(report)
return findings

View File

@@ -0,0 +1,77 @@
from re import search
from unittest import mock
from uuid import uuid4
from providers.aws.services.redshift.redshift_service import Cluster
AWS_REGION = "eu-west-1"
AWS_ACCOUNT_NUMBER = "123456789012"
cluster_id = str(uuid4())
class Test_redshift_cluster_automated_snapshot:
def test_no_clusters(self):
redshift_client = mock.MagicMock
redshift_client.clusters = []
with mock.patch(
"providers.aws.services.redshift.redshift_service.Redshift",
redshift_client,
):
from providers.aws.services.redshift.redshift_cluster_automated_snapshot.redshift_cluster_automated_snapshot import (
redshift_cluster_automated_snapshot,
)
check = redshift_cluster_automated_snapshot()
result = check.execute()
assert len(result) == 0
def test_cluster_is_not_performing_snapshots(self):
redshift_client = mock.MagicMock
redshift_client.clusters = []
redshift_client.clusters.append(
Cluster(
id=cluster_id,
region=AWS_REGION,
cluster_snapshots=False,
)
)
with mock.patch(
"providers.aws.services.redshift.redshift_service.Redshift",
redshift_client,
):
from providers.aws.services.redshift.redshift_cluster_automated_snapshot.redshift_cluster_automated_snapshot import (
redshift_cluster_automated_snapshot,
)
check = redshift_cluster_automated_snapshot()
result = check.execute()
assert result[0].status == "FAIL"
assert search("has automated snapshots disabled", result[0].status_extended)
assert result[0].resource_id == cluster_id
assert result[0].resource_arn == ""
def test_cluster_is_audit_logging(self):
redshift_client = mock.MagicMock
redshift_client.clusters = []
redshift_client.clusters.append(
Cluster(
id=cluster_id,
region=AWS_REGION,
cluster_snapshots=True,
)
)
with mock.patch(
"providers.aws.services.redshift.redshift_service.Redshift",
redshift_client,
):
from providers.aws.services.redshift.redshift_cluster_automated_snapshot.redshift_cluster_automated_snapshot import (
redshift_cluster_automated_snapshot,
)
check = redshift_cluster_automated_snapshot()
result = check.execute()
assert result[0].status == "PASS"
assert search("has automated snapshots", result[0].status_extended)
assert result[0].resource_id == cluster_id
assert result[0].resource_arn == ""

View File

@@ -0,0 +1,35 @@
{
"Provider": "aws",
"CheckID": "redshift_cluster_automatic_upgrades",
"CheckTitle": "Check for Publicly Accessible Redshift Clusters",
"CheckType": [],
"ServiceName": "redshift",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:redshift:region:account-id:cluster:cluster-name",
"Severity": "high",
"ResourceType": "AwsRedshiftCluster",
"Description": "Check for Publicly Accessible Redshift Clusters",
"Risk": "Without automatic version upgrade enabled; a critical Redshift Cluster version can become severly out of date",
"RelatedUrl": "https://docs.aws.amazon.com/redshift/latest/mgmt/managing-cluster-operations.html",
"Remediation": {
"Code": {
"CLI": "aws redshift modify-cluster --cluster-identifier <cluster_id> --allow-version-upgrade",
"NativeIaC": "https://docs.bridgecrew.io/docs/public_9#cloudformation",
"Other": "",
"Terraform": "https://docs.bridgecrew.io/docs/ensure-that-redshift-clusters-allow-version-upgrade-by-default#terraform"
},
"Recommendation": {
"Text": "Enabled AutomaticVersionUpgrade on Redshift Cluster",
"Url": "https://docs.aws.amazon.com/redshift/latest/mgmt/managing-cluster-operations.html"
}
},
"Categories": [],
"Tags": {
"Tag1Key": "value",
"Tag2Key": "value"
},
"DependsOn": [],
"RelatedTo": [],
"Notes": "",
"Compliance": []
}

View File

@@ -0,0 +1,25 @@
from lib.check.models import Check, Check_Report
from providers.aws.services.redshift.redshift_client import redshift_client
class redshift_cluster_automatic_upgrades(Check):
def execute(self):
findings = []
for cluster in redshift_client.clusters:
report = Check_Report(self.metadata)
report.region = cluster.region
report.resource_id = cluster.id
report.resource_arn = cluster.arn
report.status = "PASS"
report.status_extended = (
f"Redshift Cluster {cluster.arn} has AllowVersionUpgrade enabled"
)
if not cluster.allow_version_upgrade:
report.status = "FAIL"
report.status_extended = (
f"Redshift Cluster {cluster.arn} has AllowVersionUpgrade disabled"
)
findings.append(report)
return findings

View File

@@ -0,0 +1,73 @@
from re import search
from unittest import mock
from uuid import uuid4
from providers.aws.services.redshift.redshift_service import Cluster
AWS_REGION = "eu-west-1"
AWS_ACCOUNT_NUMBER = "123456789012"
cluster_id = str(uuid4())
class Test_redshift_cluster_automatic_upgrades:
def test_no_clusters(self):
redshift_client = mock.MagicMock
redshift_client.clusters = []
with mock.patch(
"providers.aws.services.redshift.redshift_service.Redshift",
redshift_client,
):
from providers.aws.services.redshift.redshift_cluster_automatic_upgrades.redshift_cluster_automatic_upgrades import (
redshift_cluster_automatic_upgrades,
)
check = redshift_cluster_automatic_upgrades()
result = check.execute()
assert len(result) == 0
def test_cluster_not_automatic_upgrades(self):
redshift_client = mock.MagicMock
redshift_client.clusters = []
redshift_client.clusters.append(
Cluster(
id=cluster_id,
region=AWS_REGION,
allow_version_upgrade=False,
)
)
with mock.patch(
"providers.aws.services.redshift.redshift_service.Redshift",
redshift_client,
):
from providers.aws.services.redshift.redshift_cluster_automatic_upgrades.redshift_cluster_automatic_upgrades import (
redshift_cluster_automatic_upgrades,
)
check = redshift_cluster_automatic_upgrades()
result = check.execute()
assert result[0].status == "FAIL"
assert search("has AllowVersionUpgrade disabled", result[0].status_extended)
assert result[0].resource_id == cluster_id
assert result[0].resource_arn == ""
def test_cluster_automatic_upgrades(self):
redshift_client = mock.MagicMock
redshift_client.clusters = []
redshift_client.clusters.append(
Cluster(id=cluster_id, region=AWS_REGION, allow_version_upgrade=True)
)
with mock.patch(
"providers.aws.services.redshift.redshift_service.Redshift",
redshift_client,
):
from providers.aws.services.redshift.redshift_cluster_automatic_upgrades.redshift_cluster_automatic_upgrades import (
redshift_cluster_automatic_upgrades,
)
check = redshift_cluster_automatic_upgrades()
result = check.execute()
assert result[0].status == "PASS"
assert search("has AllowVersionUpgrade enabled", result[0].status_extended)
assert result[0].resource_id == cluster_id
assert result[0].resource_arn == ""

View File

@@ -0,0 +1,35 @@
{
"Provider": "aws",
"CheckID": "redshift_cluster_public_access",
"CheckTitle": "Check for Publicly Accessible Redshift Clusters",
"CheckType": [],
"ServiceName": "redshift",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:redshift:region:account-id:cluster:cluster-name",
"Severity": "high",
"ResourceType": "AwsRedshiftCluster",
"Description": "Check for Publicly Accessible Redshift Clusters",
"Risk": "Publicly accessible services could expose sensitive data to bad actors.",
"RelatedUrl": "https://docs.aws.amazon.com/redshift/latest/mgmt/managing-clusters-vpc.html",
"Remediation": {
"Code": {
"CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Redshift/redshift-cluster-publicly-accessible.html",
"NativeIaC": "https://docs.bridgecrew.io/docs/public_9#cloudformation",
"Other": "https://docs.bridgecrew.io/docs/public_9#aws-console",
"Terraform": "https://docs.bridgecrew.io/docs/public_9#terraform"
},
"Recommendation": {
"Text": "List all shared Redshift clusters and make sure there is a business reason for them.",
"Url": "https://docs.aws.amazon.com/redshift/latest/mgmt/managing-clusters-vpc.html"
}
},
"Categories": [],
"Tags": {
"Tag1Key": "value",
"Tag2Key": "value"
},
"DependsOn": [],
"RelatedTo": [],
"Notes": "",
"Compliance": []
}

View File

@@ -0,0 +1,23 @@
from lib.check.models import Check, Check_Report
from providers.aws.services.redshift.redshift_client import redshift_client
class redshift_cluster_public_access(Check):
def execute(self):
findings = []
for cluster in redshift_client.clusters:
report = Check_Report(self.metadata)
report.region = cluster.region
report.resource_id = cluster.id
report.resource_arn = cluster.arn
report.status = "PASS"
report.status_extended = (
f"Redshift Cluster {cluster.arn} is not publicly accessible"
)
if cluster.endpoint_address and cluster.public_access:
report.status = "FAIL"
report.status_extended = f"Redshift Cluster {cluster.arn} is publicly accessible at endpoint {cluster.endpoint_address}"
findings.append(report)
return findings

View File

@@ -0,0 +1,104 @@
from re import search
from unittest import mock
from uuid import uuid4
from providers.aws.services.redshift.redshift_service import Cluster
AWS_REGION = "eu-west-1"
AWS_ACCOUNT_NUMBER = "123456789012"
cluster_id = str(uuid4())
class Test_redshift_cluster_public_access:
def test_no_clusters(self):
redshift_client = mock.MagicMock
redshift_client.clusters = []
with mock.patch(
"providers.aws.services.redshift.redshift_service.Redshift",
redshift_client,
):
from providers.aws.services.redshift.redshift_cluster_public_access.redshift_cluster_public_access import (
redshift_cluster_public_access,
)
check = redshift_cluster_public_access()
result = check.execute()
assert len(result) == 0
def test_cluster_is_public(self):
redshift_client = mock.MagicMock
redshift_client.clusters = []
redshift_client.clusters.append(
Cluster(
id=cluster_id,
region=AWS_REGION,
public_access=True,
endpoint_address="192.192.192.192",
)
)
with mock.patch(
"providers.aws.services.redshift.redshift_service.Redshift",
redshift_client,
):
from providers.aws.services.redshift.redshift_cluster_public_access.redshift_cluster_public_access import (
redshift_cluster_public_access,
)
check = redshift_cluster_public_access()
result = check.execute()
assert result[0].status == "FAIL"
assert search("is publicly accessible", result[0].status_extended)
assert result[0].resource_id == cluster_id
assert result[0].resource_arn == ""
def test_cluster_is_not_public1(self):
redshift_client = mock.MagicMock
redshift_client.clusters = []
redshift_client.clusters.append(
Cluster(
id=cluster_id,
region=AWS_REGION,
public_access=False,
endpoint_address="192.192.192.192",
)
)
with mock.patch(
"providers.aws.services.redshift.redshift_service.Redshift",
redshift_client,
):
from providers.aws.services.redshift.redshift_cluster_public_access.redshift_cluster_public_access import (
redshift_cluster_public_access,
)
check = redshift_cluster_public_access()
result = check.execute()
assert result[0].status == "PASS"
assert search("is not publicly accessible", result[0].status_extended)
assert result[0].resource_id == cluster_id
assert result[0].resource_arn == ""
def test_cluster_is_not_public2(self):
redshift_client = mock.MagicMock
redshift_client.clusters = []
redshift_client.clusters.append(
Cluster(
id=cluster_id,
region=AWS_REGION,
public_access=True,
)
)
with mock.patch(
"providers.aws.services.redshift.redshift_service.Redshift",
redshift_client,
):
from providers.aws.services.redshift.redshift_cluster_public_access.redshift_cluster_public_access import (
redshift_cluster_public_access,
)
check = redshift_cluster_public_access()
result = check.execute()
assert result[0].status == "PASS"
assert search("is not publicly accessible", result[0].status_extended)
assert result[0].resource_id == cluster_id
assert result[0].resource_arn == ""

View File

@@ -0,0 +1,109 @@
import threading
from pydantic import BaseModel
from lib.logger import logger
from providers.aws.aws_provider import generate_regional_clients
################################ Redshift
class Redshift:
def __init__(self, audit_info):
self.service = "redshift"
self.session = audit_info.audit_session
self.regional_clients = generate_regional_clients(self.service, audit_info)
self.clusters = []
self.__threading_call__(self.__describe_clusters__)
self.__describe_logging_status__(self.regional_clients)
self.__describe_cluster_snapshots__(self.regional_clients)
def __get_session__(self):
return self.session
def __threading_call__(self, call):
threads = []
for regional_client in self.regional_clients.values():
threads.append(threading.Thread(target=call, args=(regional_client,)))
for t in threads:
t.start()
for t in threads:
t.join()
def __describe_clusters__(self, regional_client):
logger.info("Redshift - describing clusters...")
try:
list_clusters_paginator = regional_client.get_paginator("describe_clusters")
for page in list_clusters_paginator.paginate():
for cluster in page["Clusters"]:
cluster_to_append = Cluster(
id=cluster["ClusterIdentifier"],
region=regional_client.region,
)
if (
"PubliclyAccessible" in cluster
and cluster["PubliclyAccessible"]
):
cluster_to_append.public_access = True
if "Endpoint" in cluster and "Address" in cluster["Endpoint"]:
cluster_to_append.endpoint_address = cluster["Endpoint"][
"Address"
]
if (
"AllowVersionUpgrade" in cluster
and cluster["AllowVersionUpgrade"]
):
cluster_to_append.allow_version_upgrade = True
self.clusters.append(cluster_to_append)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def __describe_logging_status__(self, regional_clients):
logger.info("Redshift - describing logging status...")
try:
for cluster in self.clusters:
regional_client = regional_clients[cluster.region]
cluster_attributes = regional_client.describe_logging_status(
ClusterIdentifier=cluster.id
)
if (
"LoggingEnabled" in cluster_attributes
and cluster_attributes["LoggingEnabled"]
):
cluster.logging_enabled = True
if "BucketName" in cluster_attributes:
cluster.bucket = cluster_attributes["BucketName"]
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def __describe_cluster_snapshots__(self, regional_clients):
logger.info("Redshift - describing logging status...")
try:
for cluster in self.clusters:
regional_client = regional_clients[cluster.region]
cluster_snapshots = regional_client.describe_cluster_snapshots(
ClusterIdentifier=cluster.id
)
if "Snapshots" in cluster_snapshots and cluster_snapshots["Snapshots"]:
cluster.cluster_snapshots = True
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
class Cluster(BaseModel):
id: str
arn: str = ""
region: str
public_access: bool = None
endpoint_address: str = None
allow_version_upgrade: bool = None
logging_enabled: bool = None
bucket: str = None
cluster_snapshots: bool = None

View File

@@ -0,0 +1,187 @@
from unittest.mock import patch
from uuid import uuid4
import botocore
from boto3 import client, session
from moto import mock_redshift
from providers.aws.lib.audit_info.models import AWS_Audit_Info
from providers.aws.services.redshift.redshift_service import Redshift
AWS_ACCOUNT_NUMBER = 123456789012
AWS_REGION = "eu-west-1"
topic_name = "test-topic"
test_policy = {
"Statement": [
{
"Effect": "Allow",
"Principal": {"AWS": f"{AWS_ACCOUNT_NUMBER}"},
"Action": ["redshift:Publish"],
"Resource": f"arn:aws:redshift:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:{topic_name}",
}
]
}
test_bucket_name = "test-bucket"
cluster_id = str(uuid4())
make_api_call = botocore.client.BaseClient._make_api_call
def mock_make_api_call(self, operation_name, kwarg):
if operation_name == "DescribeLoggingStatus":
return {
"LoggingEnabled": True,
"BucketName": test_bucket_name,
}
if operation_name == "DescribeClusterSnapshots":
return {
"Snapshots": [
{
"SnapshotIdentifier": uuid4(),
},
]
}
return make_api_call(self, operation_name, kwarg)
def mock_generate_regional_clients(service, audit_info):
regional_client = audit_info.audit_session.client(service, region_name=AWS_REGION)
regional_client.region = AWS_REGION
return {AWS_REGION: regional_client}
@patch("botocore.client.BaseClient._make_api_call", new=mock_make_api_call)
@patch(
"providers.aws.services.redshift.redshift_service.generate_regional_clients",
new=mock_generate_regional_clients,
)
class Test_Redshift_Service:
# Mocked Audit Info
def set_mocked_audit_info(self):
audit_info = AWS_Audit_Info(
original_session=None,
audit_session=session.Session(
profile_name=None,
botocore_session=None,
),
audited_account=AWS_ACCOUNT_NUMBER,
audited_user_id=None,
audited_partition="aws",
audited_identity_arn=None,
profile=None,
profile_region=None,
credentials=None,
assumed_role_info=None,
audited_regions=None,
organizations_metadata=None,
)
return audit_info
# Test Redshift Service
def test_service(self):
audit_info = self.set_mocked_audit_info()
redshift = Redshift(audit_info)
assert redshift.service == "redshift"
# Test Redshift client
def test_client(self):
audit_info = self.set_mocked_audit_info()
redshift = Redshift(audit_info)
for reg_client in redshift.regional_clients.values():
assert reg_client.__class__.__name__ == "Redshift"
# Test Redshift session
def test__get_session__(self):
audit_info = self.set_mocked_audit_info()
redshift = Redshift(audit_info)
assert redshift.session.__class__.__name__ == "Session"
@mock_redshift
def test_describe_clusters(self):
redshift_client = client("redshift", region_name=AWS_REGION)
response = redshift_client.create_cluster(
DBName="test",
ClusterIdentifier=cluster_id,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="user",
MasterUserPassword="password",
PubliclyAccessible=True,
)
audit_info = self.set_mocked_audit_info()
redshift = Redshift(audit_info)
assert len(redshift.clusters) == 1
assert redshift.clusters[0].id == cluster_id
assert redshift.clusters[0].region == AWS_REGION
assert redshift.clusters[0].public_access
assert (
redshift.clusters[0].endpoint_address
== response["Cluster"]["Endpoint"]["Address"]
)
assert (
redshift.clusters[0].allow_version_upgrade
== response["Cluster"]["AllowVersionUpgrade"]
)
@mock_redshift
def test_describe_logging_status(self):
redshift_client = client("redshift", region_name=AWS_REGION)
response = redshift_client.create_cluster(
DBName="test",
ClusterIdentifier=cluster_id,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="user",
MasterUserPassword="password",
PubliclyAccessible=True,
)
audit_info = self.set_mocked_audit_info()
redshift = Redshift(audit_info)
assert len(redshift.clusters) == 1
assert redshift.clusters[0].id == cluster_id
assert redshift.clusters[0].region == AWS_REGION
assert redshift.clusters[0].public_access
assert (
redshift.clusters[0].endpoint_address
== response["Cluster"]["Endpoint"]["Address"]
)
assert (
redshift.clusters[0].allow_version_upgrade
== response["Cluster"]["AllowVersionUpgrade"]
)
assert redshift.clusters[0].logging_enabled
assert redshift.clusters[0].bucket == test_bucket_name
@mock_redshift
def test_describe_describe_cluster_snapshot(self):
redshift_client = client("redshift", region_name=AWS_REGION)
response = redshift_client.create_cluster(
DBName="test",
ClusterIdentifier=cluster_id,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="user",
MasterUserPassword="password",
PubliclyAccessible=True,
)
audit_info = self.set_mocked_audit_info()
redshift = Redshift(audit_info)
assert len(redshift.clusters) == 1
assert redshift.clusters[0].id == cluster_id
assert redshift.clusters[0].region == AWS_REGION
assert redshift.clusters[0].public_access
assert (
redshift.clusters[0].endpoint_address
== response["Cluster"]["Endpoint"]["Address"]
)
assert (
redshift.clusters[0].allow_version_upgrade
== response["Cluster"]["AllowVersionUpgrade"]
)
assert redshift.clusters[0].logging_enabled
assert redshift.clusters[0].bucket == test_bucket_name
assert redshift.clusters[0].cluster_snapshots