From 67b5de205b46bc089cb15de64264229e4652c868 Mon Sep 17 00:00:00 2001 From: Nacho Rivera <59198746+n4ch04@users.noreply.github.com> Date: Thu, 17 Nov 2022 11:50:13 +0100 Subject: [PATCH] feat(): EKS service and checks (#1479) Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com> Co-authored-by: sergargar --- providers/aws/services/eks/__init__.py | 0 providers/aws/services/eks/check_extra794 | 52 ------ providers/aws/services/eks/check_extra795 | 49 ------ providers/aws/services/eks/check_extra796 | 54 ------ providers/aws/services/eks/check_extra797 | 47 ------ providers/aws/services/eks/eks_client.py | 4 + .../__init__.py | 0 ...ncryption_in_secrets_enabled.metadata.json | 35 ++++ ...r_kms_cmk_encryption_in_secrets_enabled.py | 25 +++ ..._cmk_encryption_in_secrets_enabled_test.py | 87 ++++++++++ .../__init__.py | 0 ...e_endpoint_access_restricted.metadata.json | 35 ++++ ...ontrol_plane_endpoint_access_restricted.py | 25 +++ ...l_plane_endpoint_access_restricted_test.py | 129 ++++++++++++++ .../__init__.py | 0 ...ne_logging_all_types_enabled.metadata.json | 35 ++++ ...control_plane_logging_all_types_enabled.py | 34 ++++ ...ol_plane_logging_all_types_enabled_test.py | 132 +++++++++++++++ .../__init__.py | 0 ...ints_not_publicly_accessible.metadata.json | 35 ++++ .../eks_endpoints_not_publicly_accessible.py | 24 +++ ..._endpoints_not_publicly_accessible_test.py | 93 +++++++++++ providers/aws/services/eks/eks_service.py | 108 ++++++++++++ .../aws/services/eks/eks_service_test.py | 158 ++++++++++++++++++ 24 files changed, 959 insertions(+), 202 deletions(-) create mode 100644 providers/aws/services/eks/__init__.py delete mode 100644 providers/aws/services/eks/check_extra794 delete mode 100644 providers/aws/services/eks/check_extra795 delete mode 100644 providers/aws/services/eks/check_extra796 delete mode 100644 providers/aws/services/eks/check_extra797 create mode 100644 providers/aws/services/eks/eks_client.py create mode 100644 providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/__init__.py create mode 100644 providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.metadata.json create mode 100644 providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.py create mode 100644 providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled_test.py create mode 100644 providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/__init__.py create mode 100644 providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted.metadata.json create mode 100644 providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted.py create mode 100644 providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted_test.py create mode 100644 providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/__init__.py create mode 100644 providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.metadata.json create mode 100644 providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.py create mode 100644 providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled_test.py create mode 100644 providers/aws/services/eks/eks_endpoints_not_publicly_accessible/__init__.py create mode 100644 providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible.metadata.json create mode 100644 providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible.py create mode 100644 providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible_test.py create mode 100644 providers/aws/services/eks/eks_service.py create mode 100644 providers/aws/services/eks/eks_service_test.py diff --git a/providers/aws/services/eks/__init__.py b/providers/aws/services/eks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/eks/check_extra794 b/providers/aws/services/eks/check_extra794 deleted file mode 100644 index ef2b5c50..00000000 --- a/providers/aws/services/eks/check_extra794 +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env bash - -# Prowler - the handy cloud security tool (copyright 2019) by Toni de la Fuente -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -CHECK_ID_extra794="7.94" -CHECK_TITLE_extra794="[extra794] Ensure EKS Control Plane Audit Logging is enabled for all log types" -CHECK_SCORED_extra794="NOT_SCORED" -CHECK_CIS_LEVEL_extra794="EXTRA" -CHECK_SEVERITY_extra794="Medium" -CHECK_ASFF_RESOURCE_TYPE_extra794="AwsEksCluster" -CHECK_ALTERNATE_check794="extra794" -CHECK_SERVICENAME_extra794="eks" -CHECK_RISK_extra794='If logs are not enabled; monitoring of service use and threat analysis is not possible.' -CHECK_REMEDIATION_extra794='Make sure you logging for EKS control plane is enabled.' -CHECK_DOC_extra794='https://docs.aws.amazon.com/eks/latest/userguide/logging-monitoring.html' -CHECK_CAF_EPIC_extra794='Logging and Monitoring' - -extra794(){ - for regx in $REGIONS; do - CLUSTERS=$($AWSCLI eks list-clusters $PROFILE_OPT --region $regx --query 'clusters[]' --output text 2>&1) - if [[ $(echo "$CLUSTERS" | grep -E 'AccessDenied|UnauthorizedOperation|AuthorizationError') ]]; then - textInfo "$regx: Access Denied trying to list EKS clusters" "$regx" - continue - fi - if [[ $CLUSTERS ]]; then - for CLUSTER in $CLUSTERS;do - CLUSTERDEF=$($AWSCLI eks describe-cluster $PROFILE_OPT --region $regx --name $CLUSTER --query 'cluster.logging.clusterLogging[0]') - LOGGING_ENABLED=$(echo $CLUSTERDEF | jq -r '.enabled') - TYPES=$(echo $CLUSTERDEF | jq -r '.types[]') - if [[ $LOGGING_ENABLED == "true" ]]; then - if [[ $(echo $TYPES | egrep "api.*audit.*authenticator.*controllerManager.*scheduler") ]]; then - textPass "$regx: Control plane logging enabled and correctly configured for EKS cluster $CLUSTER" "$regx" "$CLUSTER" - else - textFail "$regx: Control plane logging enabled; not all log types collected for EKS cluster $CLUSTER" "$regx" "$CLUSTER" - fi - else - textFail "$regx: Control plane logging is not enabled for EKS cluster $CLUSTER" "$regx" "$CLUSTER" - fi - done - else - textInfo "$regx: No EKS clusters found" "$regx" - fi - done -} diff --git a/providers/aws/services/eks/check_extra795 b/providers/aws/services/eks/check_extra795 deleted file mode 100644 index fa2398a3..00000000 --- a/providers/aws/services/eks/check_extra795 +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bash - -# Prowler - the handy cloud security tool (copyright 2019) by Toni de la Fuente -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -CHECK_ID_extra795="7.95" -CHECK_TITLE_extra795="[extra795] Ensure EKS Clusters are created with Private Endpoint Enabled and Public Access Disabled" -CHECK_SCORED_extra795="NOT_SCORED" -CHECK_CIS_LEVEL_extra795="EXTRA" -CHECK_SEVERITY_extra795="High" -CHECK_ASFF_RESOURCE_TYPE_extra795="AwsEksCluster" -CHECK_ALTERNATE_check795="extra795" -CHECK_SERVICENAME_extra795="eks" -CHECK_RISK_extra795='Publicly accessible services could expose sensitive data to bad actors.' -CHECK_REMEDIATION_extra795='Enable private access to the Kubernetes API server so that all communication between your nodes and the API server stays within your VPC. Disable internet access to the API server.' -CHECK_DOC_extra795='https://docs.aws.amazon.com/eks/latest/userguide/infrastructure-security.html' -CHECK_CAF_EPIC_extra795='Infrastructure Security' - -extra795(){ - for regx in $REGIONS; do - CLUSTERS=$($AWSCLI eks list-clusters $PROFILE_OPT --region $regx --query 'clusters[]' --output text 2>&1) - if [[ $(echo "$CLUSTERS" | grep -E 'AccessDenied|UnauthorizedOperation|AuthorizationError') ]]; then - textInfo "$regx: Access Denied trying to list EKS clusters" "$regx" - continue - fi - if [[ $CLUSTERS ]]; then - for CLUSTER in $CLUSTERS;do - CLUSTERDEF=$($AWSCLI eks describe-cluster $PROFILE_OPT --region $regx --name $CLUSTER --query 'cluster.resourcesVpcConfig') - PUB_ENABLED=$(echo $CLUSTERDEF | jq -r '.endpointPublicAccess') - PRIV_ENABLED=$(echo $CLUSTERDEF | jq -r '.endpointPrivateAccess') - - if [[ $PUB_ENABLED == "false" ]] && [[ $PRIV_ENABLED == "true" ]] ; then - textPass "$regx: Cluster endpoint access is private for EKS cluster $CLUSTER" "$regx" "$CLUSTER" - else - textFail "$regx: Cluster endpoint access is public for EKS cluster $CLUSTER" "$regx" "$CLUSTER" - fi - done - else - textInfo "$regx: No EKS clusters found" "$regx" - fi - done -} diff --git a/providers/aws/services/eks/check_extra796 b/providers/aws/services/eks/check_extra796 deleted file mode 100644 index 70611d9e..00000000 --- a/providers/aws/services/eks/check_extra796 +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash - -# Prowler - the handy cloud security tool (copyright 2019) by Toni de la Fuente -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -CHECK_ID_extra796="7.96" -CHECK_TITLE_extra796="[extra796] Restrict Access to the EKS Control Plane Endpoint" -CHECK_SCORED_extra796="NOT_SCORED" -CHECK_CIS_LEVEL_extra796="EXTRA" -CHECK_SEVERITY_extra796="High" -CHECK_ASFF_RESOURCE_TYPE_extra796="AwsEksCluster" -CHECK_ALTERNATE_check796="extra796" -CHECK_SERVICENAME_extra796="eks" -CHECK_RISK_extra796='By default; this API server endpoint is public to the internet; and access to the API server is secured using a combination of AWS Identity and Access Management (IAM) and native Kubernetes Role Based Access Control (RBAC).' -CHECK_REMEDIATION_extra796='You should enable private access to the Kubernetes API server so that all communication between your nodes and the API server stays within your VPC. You can limit the IP addresses that can access your API server from the internet; or completely disable internet access to the API server.' -CHECK_DOC_extra796='https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html' -CHECK_CAF_EPIC_extra796='Infrastructure Security' - -extra796(){ - for regx in $REGIONS; do - CLUSTERS=$($AWSCLI eks list-clusters $PROFILE_OPT --region $regx --query 'clusters[]' --output text 2>&1) - if [[ $(echo "$CLUSTERS" | grep -E 'AccessDenied|UnauthorizedOperation|AuthorizationError') ]]; then - textInfo "$regx: Access Denied trying to list EKS clusters" "$regx" - continue - fi - if [[ $CLUSTERS ]]; then - for CLUSTER in $CLUSTERS;do - CLUSTERDEF=$($AWSCLI eks describe-cluster $PROFILE_OPT --region $regx --name $CLUSTER --query 'cluster.resourcesVpcConfig') - PUB_ENABLED=$(echo $CLUSTERDEF | jq -r '.endpointPublicAccess') - PRIV_ENABLED=$(echo $CLUSTERDEF | jq -r '.endpointPrivateAccess') - PUB_ACCESS_CIDRS=$(echo $CLUSTERDEF | jq -r '.publicAccessCidrs') - - if [[ $PUB_ENABLED == "false" ]] && [[ $PRIV_ENABLED == "true" ]] ; then - textPass "$regx: Cluster endpoint access is private for EKS cluster $CLUSTER" "$regx" - else - if [[ $(echo $PUB_ACCESS_CIDRS | grep "0.0.0.0/0") ]] ; then - textFail "$regx: Cluster control plane access is not restricted for EKS cluster $CLUSTER" "$regx" "$CLUSTER" - else - textPass "$regx: Cluster control plane access is restricted for EKS cluster $CLUSTER" "$regx" "$CLUSTER" - fi - fi - done - else - textInfo "$regx: No EKS clusters found" "$regx" - fi - done -} diff --git a/providers/aws/services/eks/check_extra797 b/providers/aws/services/eks/check_extra797 deleted file mode 100644 index beb28f76..00000000 --- a/providers/aws/services/eks/check_extra797 +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -# Prowler - the handy cloud security tool (copyright 2019) by Toni de la Fuente -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -CHECK_ID_extra797="7.97" -CHECK_TITLE_extra797="[extra797] Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs)" -CHECK_SCORED_extra797="NOT_SCORED" -CHECK_CIS_LEVEL_extra797="EXTRA" -CHECK_SEVERITY_extra797="Medium" -CHECK_ASFF_RESOURCE_TYPE_extra797="AwsEksCluster" -CHECK_ALTERNATE_check797="extra797" -CHECK_SERVICENAME_extra797="eks" -CHECK_RISK_extra797='Implementing envelope encryption is considered a security best practice for applications that store sensitive data and is part of a defense in depth security strategy.' -CHECK_REMEDIATION_extra797=' Setup your own Customer Master Key (CMK) in KMS and link this key by providing the CMK ARN when you create an EKS cluster.' -CHECK_DOC_extra797='https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html' -CHECK_CAF_EPIC_extra797='Data Protection' - -extra797(){ - for regx in $REGIONS; do - CLUSTERS=$($AWSCLI eks list-clusters $PROFILE_OPT --region $regx --query 'clusters[]' --output text 2>&1) - if [[ $(echo "$CLUSTERS" | grep -E 'AccessDenied|UnauthorizedOperation|AuthorizationError') ]]; then - textInfo "$regx: Access Denied trying to list EKS clusters" "$regx" - continue - fi - if [[ $CLUSTERS ]]; then - for CLUSTER in $CLUSTERS;do - ENC_CONFIG=$($AWSCLI eks describe-cluster $PROFILE_OPT --region $regx --name $CLUSTER --query 'cluster.encryptionConfig') - - if [[ $ENC_CONFIG == "null" ]]; then - textFail "$regx: Encryption for Kubernetes secrets is not configured for EKS cluster $CLUSTER" "$regx" "$CLUSTER" - else - textPass "$regx: Encryption for Kubernetes secrets is configured for EKS cluster $CLUSTER" "$regx" "$CLUSTER" - fi - done - else - textInfo "$regx: No EKS clusters found" "$regx" - fi - done -} diff --git a/providers/aws/services/eks/eks_client.py b/providers/aws/services/eks/eks_client.py new file mode 100644 index 00000000..dd01bd35 --- /dev/null +++ b/providers/aws/services/eks/eks_client.py @@ -0,0 +1,4 @@ +from providers.aws.lib.audit_info.audit_info import current_audit_info +from providers.aws.services.eks.eks_service import EKS + +eks_client = EKS(current_audit_info) diff --git a/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/__init__.py b/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.metadata.json b/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.metadata.json new file mode 100644 index 00000000..5758d038 --- /dev/null +++ b/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.metadata.json @@ -0,0 +1,35 @@ +{ + "Provider": "aws", + "CheckID": "eks_cluster_kms_cmk_encryption_in_secrets_enabled", + "CheckTitle": "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs)", + "CheckType": ["Protect" , "Data protection"], + "ServiceName": "eks", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsEksCluster", + "Description": "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs)", + "Risk": "Implementing envelope encryption is considered a security best practice for applications that store sensitive data and is part of a defense in depth security strategy.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/EKS/enable-envelope-encryption.html", + "NativeIaC": "https://docs.bridgecrew.io/docs/bc_aws_kubernetes_3#fix---builtime", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Setup your own Customer Master Key (CMK) in KMS and link this key by providing the CMK ARN when you create an EKS cluster.", + "Url": "https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "", + "Compliance": [] + } diff --git a/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.py b/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.py new file mode 100644 index 00000000..633def6d --- /dev/null +++ b/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.py @@ -0,0 +1,25 @@ +from lib.check.models import Check, Check_Report +from providers.aws.services.eks.eks_client import eks_client + + +class eks_cluster_kms_cmk_encryption_in_secrets_enabled(Check): + def execute(self): + findings = [] + for cluster in eks_client.clusters: + report = Check_Report(self.metadata) + report.region = cluster.region + report.resource_id = cluster.name + report.resource_arn = cluster.arn + report.status = "FAIL" + report.status_extended = ( + f"EKS cluster {cluster.name} has not encryption for Kubernetes secrets." + ) + if cluster.encryptionConfig: + report.status = "PASS" + report.status_extended = ( + f"EKS cluster {cluster.name} has encryption for Kubernetes secrets." + ) + + findings.append(report) + + return findings diff --git a/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled_test.py b/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled_test.py new file mode 100644 index 00000000..396e9273 --- /dev/null +++ b/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled_test.py @@ -0,0 +1,87 @@ +from re import search +from unittest import mock + +from providers.aws.services.eks.eks_service import EKSCluster + +AWS_REGION = "eu-west-1" +AWS_ACCOUNT_NUMBER = "123456789012" + +cluster_name = "cluster_test" +cluster_arn = f"arn:aws:eks:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:cluster/{cluster_name}" + + +class Test_eks_cluster_kms_cmk_encryption_in_secrets_enabled: + def test_no_clusters(self): + eks_client = mock.MagicMock + eks_client.clusters = [] + with mock.patch( + "providers.aws.services.eks.eks_service.EKS", + eks_client, + ): + from providers.aws.services.eks.eks_cluster_kms_cmk_encryption_in_secrets_enabled.eks_cluster_kms_cmk_encryption_in_secrets_enabled import ( + eks_cluster_kms_cmk_encryption_in_secrets_enabled, + ) + + check = eks_cluster_kms_cmk_encryption_in_secrets_enabled() + result = check.execute() + assert len(result) == 0 + + def test_not_secrets_encryption(self): + eks_client = mock.MagicMock + eks_client.clusters = [] + eks_client.clusters.append( + EKSCluster( + name=cluster_name, + arn=cluster_arn, + region=AWS_REGION, + encryptionConfig=False, + ) + ) + + with mock.patch( + "providers.aws.services.eks.eks_service.EKS", + eks_client, + ): + from providers.aws.services.eks.eks_cluster_kms_cmk_encryption_in_secrets_enabled.eks_cluster_kms_cmk_encryption_in_secrets_enabled import ( + eks_cluster_kms_cmk_encryption_in_secrets_enabled, + ) + + check = eks_cluster_kms_cmk_encryption_in_secrets_enabled() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "has not encryption for Kubernetes secrets", result[0].status_extended + ) + assert result[0].resource_id == cluster_name + assert result[0].resource_arn == cluster_arn + + def test_secrets_encryption(self): + eks_client = mock.MagicMock + eks_client.clusters = [] + eks_client.clusters.append( + EKSCluster( + name=cluster_name, + arn=cluster_arn, + region=AWS_REGION, + encryptionConfig=True, + ) + ) + + with mock.patch( + "providers.aws.services.eks.eks_service.EKS", + eks_client, + ): + from providers.aws.services.eks.eks_cluster_kms_cmk_encryption_in_secrets_enabled.eks_cluster_kms_cmk_encryption_in_secrets_enabled import ( + eks_cluster_kms_cmk_encryption_in_secrets_enabled, + ) + + check = eks_cluster_kms_cmk_encryption_in_secrets_enabled() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "has encryption for Kubernetes secrets", result[0].status_extended + ) + assert result[0].resource_id == cluster_name + assert result[0].resource_arn == cluster_arn diff --git a/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/__init__.py b/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted.metadata.json b/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted.metadata.json new file mode 100644 index 00000000..efac79bf --- /dev/null +++ b/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted.metadata.json @@ -0,0 +1,35 @@ +{ + "Provider": "aws", + "CheckID": "eks_control_plane_endpoint_access_restricted", + "CheckTitle": "Restrict Access to the EKS Control Plane Endpoint", + "CheckType": ["Infrastructure Security"], + "ServiceName": "eks", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsEksCluster", + "Description": "Restrict Access to the EKS Control Plane Endpoint", + "Risk": "By default; this API server endpoint is public to the internet; and access to the API server is secured using a combination of AWS Identity and Access Management (IAM) and native Kubernetes Role Based Access Control (RBAC).", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws eks update-cluster-config --region --name --resources-vpc-config endpointPublicAccess=false,endpointPrivateAccess=true,publicAccessCidrs=[\"123.123.123.123/32\"]", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "You should enable private access to the Kubernetes API server so that all communication between your nodes and the API server stays within your VPC. You can limit the IP addresses that can access your API server from the internet; or completely disable internet access to the API server.", + "Url": "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "", + "Compliance": [] + } diff --git a/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted.py b/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted.py new file mode 100644 index 00000000..62dbf6f4 --- /dev/null +++ b/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted.py @@ -0,0 +1,25 @@ +from lib.check.models import Check, Check_Report +from providers.aws.services.eks.eks_client import eks_client + + +class eks_control_plane_endpoint_access_restricted(Check): + def execute(self): + findings = [] + for cluster in eks_client.clusters: + report = Check_Report(self.metadata) + report.region = cluster.region + report.resource_id = cluster.name + report.resource_arn = cluster.arn + report.status = "PASS" + report.status_extended = ( + f"Cluster endpoint access is private for EKS cluster {cluster.name}" + ) + if cluster.endpoint_public_access and not cluster.endpoint_private_access: + if "0.0.0.0/0" in cluster.public_access_cidrs: + report.status = "FAIL" + report.status_extended = f"Cluster control plane access is not restricted for EKS cluster {cluster.name}" + else: + report.status_extended = f"Cluster control plane access is restricted for EKS cluster {cluster.name}" + findings.append(report) + + return findings diff --git a/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted_test.py b/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted_test.py new file mode 100644 index 00000000..a3a40812 --- /dev/null +++ b/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted_test.py @@ -0,0 +1,129 @@ +from re import search +from unittest import mock + +from providers.aws.services.eks.eks_service import EKSCluster + +AWS_REGION = "eu-west-1" +AWS_ACCOUNT_NUMBER = "123456789012" + +cluster_name = "cluster_test" +cluster_arn = f"arn:aws:eks:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:cluster/{cluster_name}" + + +class Test_eks_control_plane_endpoint_access_restricted: + def test_no_clusters(self): + eks_client = mock.MagicMock + eks_client.clusters = [] + with mock.patch( + "providers.aws.services.eks.eks_service.EKS", + eks_client, + ): + from providers.aws.services.eks.eks_control_plane_endpoint_access_restricted.eks_control_plane_endpoint_access_restricted import ( + eks_control_plane_endpoint_access_restricted, + ) + + check = eks_control_plane_endpoint_access_restricted() + result = check.execute() + assert len(result) == 0 + + def test_control_plane_private(self): + eks_client = mock.MagicMock + eks_client.clusters = [] + eks_client.clusters.append( + EKSCluster( + name=cluster_name, + arn=cluster_arn, + region=AWS_REGION, + logging=None, + endpoint_public_access=False, + endpoint_private_access=True, + public_access_cidrs=["123.123.123.123/32"], + ) + ) + + with mock.patch( + "providers.aws.services.eks.eks_service.EKS", + eks_client, + ): + from providers.aws.services.eks.eks_control_plane_endpoint_access_restricted.eks_control_plane_endpoint_access_restricted import ( + eks_control_plane_endpoint_access_restricted, + ) + + check = eks_control_plane_endpoint_access_restricted() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "Cluster endpoint access is private for EKS cluster", + result[0].status_extended, + ) + assert result[0].resource_id == cluster_name + assert result[0].resource_arn == cluster_arn + + def test_control_plane_access_restricted(self): + eks_client = mock.MagicMock + eks_client.clusters = [] + eks_client.clusters.append( + EKSCluster( + name=cluster_name, + arn=cluster_arn, + region=AWS_REGION, + logging=None, + endpoint_public_access=True, + endpoint_private_access=False, + public_access_cidrs=["123.123.123.123/32"], + ) + ) + + with mock.patch( + "providers.aws.services.eks.eks_service.EKS", + eks_client, + ): + from providers.aws.services.eks.eks_control_plane_endpoint_access_restricted.eks_control_plane_endpoint_access_restricted import ( + eks_control_plane_endpoint_access_restricted, + ) + + check = eks_control_plane_endpoint_access_restricted() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "Cluster control plane access is restricted for EKS cluster", + result[0].status_extended, + ) + assert result[0].resource_id == cluster_name + assert result[0].resource_arn == cluster_arn + + def test_control_plane_not_restricted(self): + eks_client = mock.MagicMock + eks_client.clusters = [] + eks_client.clusters.append( + EKSCluster( + name=cluster_name, + arn=cluster_arn, + region=AWS_REGION, + logging=None, + endpoint_public_access=True, + endpoint_private_access=False, + public_access_cidrs=["123.123.123.123/32", "0.0.0.0/0"], + ) + ) + + with mock.patch( + "providers.aws.services.eks.eks_service.EKS", + eks_client, + ): + from providers.aws.services.eks.eks_control_plane_endpoint_access_restricted.eks_control_plane_endpoint_access_restricted import ( + eks_control_plane_endpoint_access_restricted, + ) + + check = eks_control_plane_endpoint_access_restricted() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "Cluster control plane access is not restricted for EKS cluster", + result[0].status_extended, + ) + assert result[0].resource_id == cluster_name + assert result[0].resource_arn == cluster_arn diff --git a/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/__init__.py b/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.metadata.json b/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.metadata.json new file mode 100644 index 00000000..2419bf6d --- /dev/null +++ b/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.metadata.json @@ -0,0 +1,35 @@ +{ + "Provider": "aws", + "CheckID": "eks_control_plane_logging_all_types_enabled", + "CheckTitle": "Ensure EKS Control Plane Audit Logging is enabled for all log types", + "CheckType": ["Logging and Monitoring"], + "ServiceName": "eks", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsEksCluster", + "Description": "Ensure EKS Control Plane Audit Logging is enabled for all log types", + "Risk": "If logs are not enabled; monitoring of service use and threat analysis is not possible.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws eks update-cluster-config --region --name --logging '{\"clusterLogging\":[{\"types\":[\"api\",\"audit\",\"authenticator\",\"controllerManager\",\"scheduler\"],\"enabled\":true}]}'", + "NativeIaC": "", + "Other": "https://docs.bridgecrew.io/docs/bc_aws_kubernetes_4#aws-console", + "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_kubernetes_4#fix---buildtime" + }, + "Recommendation": { + "Text": "Make sure you logging for EKS control plane is enabled.", + "Url": "https://docs.aws.amazon.com/eks/latest/userguide/logging-monitoring.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "", + "Compliance": [] + } diff --git a/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.py b/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.py new file mode 100644 index 00000000..a42fb248 --- /dev/null +++ b/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.py @@ -0,0 +1,34 @@ +from lib.check.models import Check, Check_Report +from providers.aws.services.eks.eks_client import eks_client + + +class eks_control_plane_logging_all_types_enabled(Check): + def execute(self): + findings = [] + for cluster in eks_client.clusters: + report = Check_Report(self.metadata) + report.region = cluster.region + report.resource_id = cluster.name + report.resource_arn = cluster.arn + report.status = "FAIL" + report.status_extended = ( + f"Control plane logging is not enabled for EKS cluster {cluster.name}" + ) + if cluster.logging and cluster.logging.enabled: + if all( + item in cluster.logging.types + for item in [ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler", + ] + ): + report.status = "PASS" + report.status_extended = f"Control plane logging enabled and correctly configured for EKS cluster {cluster.name}" + else: + report.status_extended = f"Control plane logging enabled but not all log types collected for EKS cluster {cluster.name}" + findings.append(report) + + return findings diff --git a/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled_test.py b/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled_test.py new file mode 100644 index 00000000..dbc49611 --- /dev/null +++ b/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled_test.py @@ -0,0 +1,132 @@ +from re import search +from unittest import mock + +from providers.aws.services.eks.eks_service import EKSCluster, EKSClusterLoggingEntity + +AWS_REGION = "eu-west-1" +AWS_ACCOUNT_NUMBER = "123456789012" + +cluster_name = "cluster_test" +cluster_arn = f"arn:aws:eks:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:cluster/{cluster_name}" + + +class Test_eks_control_plane_logging_all_types_enabled: + def test_no_clusters(self): + eks_client = mock.MagicMock + eks_client.clusters = [] + with mock.patch( + "providers.aws.services.eks.eks_service.EKS", + eks_client, + ): + from providers.aws.services.eks.eks_control_plane_logging_all_types_enabled.eks_control_plane_logging_all_types_enabled import ( + eks_control_plane_logging_all_types_enabled, + ) + + check = eks_control_plane_logging_all_types_enabled() + result = check.execute() + assert len(result) == 0 + + def test_control_plane_not_loggging(self): + eks_client = mock.MagicMock + eks_client.clusters = [] + eks_client.clusters.append( + EKSCluster( + name=cluster_name, + arn=cluster_arn, + region=AWS_REGION, + logging=None, + ) + ) + + with mock.patch( + "providers.aws.services.eks.eks_service.EKS", + eks_client, + ): + from providers.aws.services.eks.eks_control_plane_logging_all_types_enabled.eks_control_plane_logging_all_types_enabled import ( + eks_control_plane_logging_all_types_enabled, + ) + + check = eks_control_plane_logging_all_types_enabled() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "Control plane logging is not enabled for EKS cluster", + result[0].status_extended, + ) + assert result[0].resource_id == cluster_name + assert result[0].resource_arn == cluster_arn + + def test_control_plane_incomplete_loggging(self): + eks_client = mock.MagicMock + eks_client.clusters = [] + eks_client.clusters.append( + EKSCluster( + name=cluster_name, + arn=cluster_arn, + region=AWS_REGION, + logging=EKSClusterLoggingEntity( + types=["api", "audit", "authenticator", "controllerManager"], + enabled=True, + ), + ) + ) + + with mock.patch( + "providers.aws.services.eks.eks_service.EKS", + eks_client, + ): + from providers.aws.services.eks.eks_control_plane_logging_all_types_enabled.eks_control_plane_logging_all_types_enabled import ( + eks_control_plane_logging_all_types_enabled, + ) + + check = eks_control_plane_logging_all_types_enabled() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "Control plane logging enabled but not all log types collected", + result[0].status_extended, + ) + assert result[0].resource_id == cluster_name + assert result[0].resource_arn == cluster_arn + + def test_control_plane_complete_loggging(self): + eks_client = mock.MagicMock + eks_client.clusters = [] + eks_client.clusters.append( + EKSCluster( + name=cluster_name, + arn=cluster_arn, + region=AWS_REGION, + logging=EKSClusterLoggingEntity( + types=[ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler", + ], + enabled=True, + ), + ) + ) + + with mock.patch( + "providers.aws.services.eks.eks_service.EKS", + eks_client, + ): + from providers.aws.services.eks.eks_control_plane_logging_all_types_enabled.eks_control_plane_logging_all_types_enabled import ( + eks_control_plane_logging_all_types_enabled, + ) + + check = eks_control_plane_logging_all_types_enabled() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "Control plane logging enabled and correctly configured", + result[0].status_extended, + ) + assert result[0].resource_id == cluster_name + assert result[0].resource_arn == cluster_arn diff --git a/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/__init__.py b/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible.metadata.json b/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible.metadata.json new file mode 100644 index 00000000..ffd100d6 --- /dev/null +++ b/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible.metadata.json @@ -0,0 +1,35 @@ +{ + "Provider": "aws", + "CheckID": "eks_endpoints_not_publicly_accessible", + "CheckTitle": "Ensure EKS Clusters are created with Private Endpoint Enabled and Public Access Disabled", + "CheckType": ["Protect", "Secure network configuration","Resources not publicly accessible"], + "ServiceName": "eks", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "high", + "ResourceType": "AwsEksCluster", + "Description": "Ensure EKS Clusters are created with Private Endpoint Enabled and Public Access Disabled", + "Risk": "Publicly accessible services could expose sensitive data to bad actors.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws eks update-cluster-config --region --name --resources-vpc-config endpointPublicAccess=false,endpointPrivateAccess=true,publicAccessCidrs=[\"123.123.123.123/32\"]", + "NativeIaC": "", + "Other": "https://github.com/cloudmatos/matos/tree/master/remediations/aws/eks/eks-disable-public-endpoint", + "Terraform": "" + }, + "Recommendation": { + "Text": "Enable private access to the Kubernetes API server so that all communication between your nodes and the API server stays within your VPC. Disable internet access to the API server.", + "Url": "https://docs.aws.amazon.com/eks/latest/userguide/infrastructure-security.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "", + "Compliance": [] + } diff --git a/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible.py b/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible.py new file mode 100644 index 00000000..475d8b3e --- /dev/null +++ b/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible.py @@ -0,0 +1,24 @@ +from lib.check.models import Check, Check_Report +from providers.aws.services.eks.eks_client import eks_client + + +class eks_endpoints_not_publicly_accessible(Check): + def execute(self): + findings = [] + for cluster in eks_client.clusters: + report = Check_Report(self.metadata) + report.region = cluster.region + report.resource_id = cluster.name + report.resource_arn = cluster.arn + report.status = "PASS" + report.status_extended = ( + f"Cluster endpoint access is private for EKS cluster {cluster.name}" + ) + if cluster.endpoint_public_access and not cluster.endpoint_private_access: + report.status = "FAIL" + report.status_extended = ( + f"Cluster endpoint access is public for EKS cluster {cluster.name}" + ) + findings.append(report) + + return findings diff --git a/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible_test.py b/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible_test.py new file mode 100644 index 00000000..95b27d6e --- /dev/null +++ b/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible_test.py @@ -0,0 +1,93 @@ +from re import search +from unittest import mock + +from providers.aws.services.eks.eks_service import EKSCluster + +AWS_REGION = "eu-west-1" +AWS_ACCOUNT_NUMBER = "123456789012" + +cluster_name = "cluster_test" +cluster_arn = f"arn:aws:eks:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:cluster/{cluster_name}" + + +class Test_eks_endpoints_not_publicly_accessible: + def test_no_clusters(self): + eks_client = mock.MagicMock + eks_client.clusters = [] + with mock.patch( + "providers.aws.services.eks.eks_service.EKS", + eks_client, + ): + from providers.aws.services.eks.eks_endpoints_not_publicly_accessible.eks_endpoints_not_publicly_accessible import ( + eks_endpoints_not_publicly_accessible, + ) + + check = eks_endpoints_not_publicly_accessible() + result = check.execute() + assert len(result) == 0 + + def test_endpoint_public_access(self): + eks_client = mock.MagicMock + eks_client.clusters = [] + eks_client.clusters.append( + EKSCluster( + name=cluster_name, + arn=cluster_arn, + region=AWS_REGION, + logging=None, + endpoint_public_access=True, + endpoint_private_access=False, + ) + ) + + with mock.patch( + "providers.aws.services.eks.eks_service.EKS", + eks_client, + ): + from providers.aws.services.eks.eks_endpoints_not_publicly_accessible.eks_endpoints_not_publicly_accessible import ( + eks_endpoints_not_publicly_accessible, + ) + + check = eks_endpoints_not_publicly_accessible() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "Cluster endpoint access is public for EKS cluster", + result[0].status_extended, + ) + assert result[0].resource_id == cluster_name + assert result[0].resource_arn == cluster_arn + + def test_endpoint_not_public_access(self): + eks_client = mock.MagicMock + eks_client.clusters = [] + eks_client.clusters.append( + EKSCluster( + name=cluster_name, + arn=cluster_arn, + region=AWS_REGION, + logging=None, + endpoint_public_access=False, + endpoint_private_access=True, + ) + ) + + with mock.patch( + "providers.aws.services.eks.eks_service.EKS", + eks_client, + ): + from providers.aws.services.eks.eks_endpoints_not_publicly_accessible.eks_endpoints_not_publicly_accessible import ( + eks_endpoints_not_publicly_accessible, + ) + + check = eks_endpoints_not_publicly_accessible() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "Cluster endpoint access is private for EKS cluster", + result[0].status_extended, + ) + assert result[0].resource_id == cluster_name + assert result[0].resource_arn == cluster_arn diff --git a/providers/aws/services/eks/eks_service.py b/providers/aws/services/eks/eks_service.py new file mode 100644 index 00000000..b35b37b8 --- /dev/null +++ b/providers/aws/services/eks/eks_service.py @@ -0,0 +1,108 @@ +import threading + +from pydantic import BaseModel + +from lib.logger import logger +from providers.aws.aws_provider import generate_regional_clients + + +################################ EKS +class EKS: + def __init__(self, audit_info): + self.service = "eks" + self.session = audit_info.audit_session + self.regional_clients = generate_regional_clients(self.service, audit_info) + self.clusters = [] + self.__threading_call__(self.__list_clusters__) + self.__describe_cluster__(self.regional_clients) + + def __get_session__(self): + return self.session + + def __threading_call__(self, call): + threads = [] + for regional_client in self.regional_clients.values(): + threads.append(threading.Thread(target=call, args=(regional_client,))) + for t in threads: + t.start() + for t in threads: + t.join() + + def __list_clusters__(self, regional_client): + logger.info("EKS listing clusters...") + try: + list_clusters_paginator = regional_client.get_paginator("list_clusters") + for page in list_clusters_paginator.paginate(): + for cluster in page["clusters"]: + self.clusters.append( + EKSCluster( + name=cluster, + region=regional_client.region, + ) + ) + + except Exception as error: + logger.error( + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + + def __describe_cluster__(self, regional_clients): + logger.info("EKS listing clusters...") + try: + for cluster in self.clusters: + regional_client = regional_clients[cluster.region] + describe_cluster = regional_client.describe_cluster(name=cluster.name) + cluster.arn = describe_cluster["cluster"]["arn"] + if "logging" in describe_cluster["cluster"]: + cluster.logging = EKSClusterLoggingEntity( + types=describe_cluster["cluster"]["logging"]["clusterLogging"][ + 0 + ]["types"], + enabled=describe_cluster["cluster"]["logging"][ + "clusterLogging" + ][0]["enabled"], + ) + if ( + "endpointPublicAccess" + in describe_cluster["cluster"]["resourcesVpcConfig"] + ): + cluster.endpoint_public_access = describe_cluster["cluster"][ + "resourcesVpcConfig" + ]["endpointPublicAccess"] + if ( + "endpointPrivateAccess" + in describe_cluster["cluster"]["resourcesVpcConfig"] + ): + cluster.endpoint_private_access = describe_cluster["cluster"][ + "resourcesVpcConfig" + ]["endpointPrivateAccess"] + if ( + "publicAccessCidrs" + in describe_cluster["cluster"]["resourcesVpcConfig"] + ): + cluster.public_access_cidrs = describe_cluster["cluster"][ + "resourcesVpcConfig" + ]["publicAccessCidrs"] + if "encryptionConfig" in describe_cluster["cluster"]: + cluster.encryptionConfig = True + + except Exception as error: + logger.error( + f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + + +class EKSClusterLoggingEntity(BaseModel): + types: list[str] = None + enabled: bool = None + + +class EKSCluster(BaseModel): + name: str + arn: str = None + region: str + logging: EKSClusterLoggingEntity = None + endpoint_public_access: bool = None + endpoint_private_access: bool = None + public_access_cidrs: list[str] = None + encryptionConfig: bool = None diff --git a/providers/aws/services/eks/eks_service_test.py b/providers/aws/services/eks/eks_service_test.py new file mode 100644 index 00000000..bb62395d --- /dev/null +++ b/providers/aws/services/eks/eks_service_test.py @@ -0,0 +1,158 @@ +from unittest.mock import patch + +from boto3 import client, session +from moto import mock_ec2, mock_eks + +from providers.aws.lib.audit_info.models import AWS_Audit_Info +from providers.aws.services.eks.eks_service import EKS + +AWS_ACCOUNT_NUMBER = 123456789012 +AWS_REGION = "eu-west-1" + +cluster_name = "test" +cidr_block_vpc = "10.0.0.0/16" +cidr_block_subnet_1 = "10.0.0.0/22" +cidr_block_subnet_2 = "10.0.4.0/22" + + +def mock_generate_regional_clients(service, audit_info): + regional_client = audit_info.audit_session.client(service, region_name=AWS_REGION) + regional_client.region = AWS_REGION + return {AWS_REGION: regional_client} + + +@patch( + "providers.aws.services.eks.eks_service.generate_regional_clients", + new=mock_generate_regional_clients, +) +class Test_EKS_Service: + # Mocked Audit Info + def set_mocked_audit_info(self): + audit_info = AWS_Audit_Info( + original_session=None, + audit_session=session.Session( + profile_name=None, + botocore_session=None, + ), + audited_account=AWS_ACCOUNT_NUMBER, + audited_user_id=None, + audited_partition="aws", + audited_identity_arn=None, + profile=None, + profile_region=None, + credentials=None, + assumed_role_info=None, + audited_regions=None, + organizations_metadata=None, + ) + return audit_info + + # Test EKS Service + def test_service(self): + audit_info = self.set_mocked_audit_info() + eks = EKS(audit_info) + assert eks.service == "eks" + + # Test EKS client + def test_client(self): + audit_info = self.set_mocked_audit_info() + eks = EKS(audit_info) + for reg_client in eks.regional_clients.values(): + assert reg_client.__class__.__name__ == "EKS" + + # Test EKS session + def test__get_session__(self): + audit_info = self.set_mocked_audit_info() + eks = EKS(audit_info) + assert eks.session.__class__.__name__ == "Session" + + # Test EKS list clusters + @mock_ec2 + @mock_eks + def test__list_clusters(self): + ec2_client = client("ec2", region_name=AWS_REGION) + eks_client = client("eks", region_name=AWS_REGION) + vpc = ec2_client.create_vpc(CidrBlock=cidr_block_vpc) + subnet1 = ec2_client.create_subnet( + VpcId=vpc["Vpc"]["VpcId"], CidrBlock=cidr_block_subnet_1 + ) + subnet2 = ec2_client.create_subnet( + VpcId=vpc["Vpc"]["VpcId"], CidrBlock=cidr_block_subnet_2 + ) + eks_client.create_cluster( + version="1.10", + name=cluster_name, + clientRequestToken="1d2129a1-3d38-460a-9756-e5b91fddb951", + resourcesVpcConfig={ + "subnetIds": [ + subnet1["Subnet"]["SubnetId"], + subnet2["Subnet"]["SubnetId"], + ], + }, + roleArn=f"arn:aws:iam::{AWS_ACCOUNT_NUMBER}:role/eks-service-role-AWSServiceRoleForAmazonEKS-J7ONKE3BQ4PI", + ) + audit_info = self.set_mocked_audit_info() + eks = EKS(audit_info) + assert len(eks.clusters) == 1 + assert eks.clusters[0].name == cluster_name + assert eks.clusters[0].region == AWS_REGION + + # Test EKS describe clusters + @mock_ec2 + @mock_eks + def test__describe_clusters(self): + ec2_client = client("ec2", region_name=AWS_REGION) + eks_client = client("eks", region_name=AWS_REGION) + vpc = ec2_client.create_vpc(CidrBlock=cidr_block_vpc) + subnet1 = ec2_client.create_subnet( + VpcId=vpc["Vpc"]["VpcId"], CidrBlock=cidr_block_subnet_1 + ) + subnet2 = ec2_client.create_subnet( + VpcId=vpc["Vpc"]["VpcId"], CidrBlock=cidr_block_subnet_2 + ) + cluster = eks_client.create_cluster( + version="1.10", + name=cluster_name, + clientRequestToken="1d2129a1-3d38-460a-9756-e5b91fddb951", + resourcesVpcConfig={ + "subnetIds": [ + subnet1["Subnet"]["SubnetId"], + subnet2["Subnet"]["SubnetId"], + ], + "endpointPublicAccess": True, + "endpointPrivateAccess": True, + "publicAccessCidrs": [ + "0.0.0.0/0", + ], + }, + logging={ + "clusterLogging": [ + { + "types": [ + "api", + ], + "enabled": True, + }, + ] + }, + roleArn=f"arn:aws:iam::{AWS_ACCOUNT_NUMBER}:role/eks-service-role-AWSServiceRoleForAmazonEKS-J7ONKE3BQ4PI", + encryptionConfig=[ + { + "resources": [ + "secrets", + ], + }, + ], + ) + audit_info = self.set_mocked_audit_info() + eks = EKS(audit_info) + assert len(eks.clusters) == 1 + assert eks.clusters[0].name == cluster_name + assert eks.clusters[0].region == AWS_REGION + assert eks.clusters[0].arn == cluster["cluster"]["arn"] + assert eks.clusters[0].logging.types == ["api"] + assert eks.clusters[0].logging.enabled + assert eks.clusters[0].endpoint_public_access + assert eks.clusters[0].endpoint_private_access + assert eks.clusters[0].public_access_cidrs == ["0.0.0.0/0"] + assert eks.clusters[0].encryptionConfig